summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c18
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig45
-rw-r--r--drivers/gpu/drm/amd/display/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/TODO107
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4932
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h259
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c498
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c755
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h102
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c441
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c379
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c161
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/log_helpers.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.c397
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c307
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile27
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c3871
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c1934
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2424
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c812
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c290
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c265
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c364
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c418
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c354
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile18
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/custom_float.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c3257
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c1899
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c120
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c1626
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1684
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c359
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c2435
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c775
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2601
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c331
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2807
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c397
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c193
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h467
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h706
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h652
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h228
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c945
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c1383
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c827
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h137
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c620
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h218
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h631
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h238
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1379
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c700
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h347
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c567
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h310
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c1119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c1620
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h733
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c1463
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c933
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c522
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2991
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1052
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c738
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c555
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c1329
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c1966
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h273
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c688
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c716
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c854
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c1283
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c1004
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c1174
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c834
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c1257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c481
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h1386
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c816
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c702
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c960
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h683
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2958
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h167
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c363
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c351
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h186
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c1468
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c1200
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h374
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h387
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h282
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h559
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h557
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c6124
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c1763
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c392
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c1905
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile58
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c192
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c408
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h150
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c591
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c232
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c571
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c570
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h210
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c875
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h113
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c284
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c244
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c601
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c485
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h122
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h136
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h283
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/custom_float.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h481
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h635
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h175
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h289
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h130
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h183
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h304
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h197
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h392
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h172
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile48
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c430
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c289
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c303
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c356
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c170
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h85
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h193
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h96
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c124
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h106
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_interface.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h310
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h143
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h49
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h154
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h149
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h466
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed32_32.h129
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h105
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_types.h332
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h445
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h140
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h294
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h92
-rw-r--r--drivers/gpu/drm/amd/display/include/irq_service_interface.h51
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h170
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h188
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h166
-rw-r--r--drivers/gpu/drm/amd/display/include/set_mode_types.h107
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h95
-rw-r--r--drivers/gpu/drm/amd/display/include/vector.h150
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c1483
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h167
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile1
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h1
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c9
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h1
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c16
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c34
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c46
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h65
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c21
-rw-r--r--drivers/gpu/drm/armada/Makefile1
-rw-r--r--drivers/gpu/drm/armada/armada_trace.c1
-rw-r--r--drivers/gpu/drm/armada/armada_trace.h1
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c1
-rw-r--r--drivers/gpu/drm/ast/ast_dram_tables.h1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Makefile1
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h13
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c32
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c17
-rw-r--r--drivers/gpu/drm/bridge/lvds-encoder.c48
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c25
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c73
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_connector.c17
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c23
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c10
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/drm_trace.h1
-rw-r--r--drivers/gpu/drm/drm_vblank.c11
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c7
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h1
-rw-r--r--drivers/gpu/drm/etnaviv/state_3d.xml.h1
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h1
-rw-r--r--drivers/gpu/drm/exynos/Makefile1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c6
-rw-r--r--drivers/gpu/drm/fsl-dcu/Makefile1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c5
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/i2c/Makefile1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c27
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h1
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c1
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c19
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c13
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c51
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c13
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_object.h1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c11
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h1
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c2
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lib/drm_random.h1
-rw-r--r--drivers/gpu/drm/mediatek/Makefile1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/cursor.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0002.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0046.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl006b.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506e.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl506f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507a.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507b.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507d.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl507e.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826e.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl826f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl906f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl9097.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/driver.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/event.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0000.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0001.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0002.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0003.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0004.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0005.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/notify.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/object.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/unpack.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/debug.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/engine.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/enum.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/event.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/mm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/notify.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/object.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/option.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c23
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_common.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c3
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h4
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c99
-rw-r--r--drivers/gpu/drm/r128/r128_state.c6
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/cik.c24
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c1
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace_points.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c6
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h1
-rw-r--r--drivers/gpu/drm/shmobile/Makefile1
-rw-r--r--drivers/gpu/drm/sti/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/sor.c157
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig11
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c269
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts72
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h25
-rw-r--r--drivers/gpu/drm/ttm/Makefile1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c38
-rw-r--r--drivers/gpu/drm/vc4/Makefile1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c6
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c6
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c13
-rw-r--r--drivers/gpu/drm/virtio/Makefile1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile1
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/zte/Makefile1
-rw-r--r--drivers/gpu/host1x/Makefile1
-rw-r--r--drivers/gpu/host1x/bus.c1
-rw-r--r--drivers/gpu/host1x/dev.c3
-rw-r--r--drivers/gpu/ipu-v3/Makefile1
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c3
835 files changed, 133885 insertions, 1122 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 81ff79336623..e9500844333e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 26682454a446..e8af1f5e8a79 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -41,3 +41,4 @@ config DRM_AMDGPU_GART_DEBUGFS
pages. Uses more memory for housekeeping, enable only for debugging.
source "drivers/gpu/drm/amd/acp/Kconfig"
+source "drivers/gpu/drm/amd/display/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index ef9a3b6d7b62..78d609123420 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,15 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/..
+DISPLAY_FOLDER_NAME=display
+FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc \
- -I$(FULL_AMD_PATH)/acp/include
+ -I$(FULL_AMD_PATH)/acp/include \
+ -I$(FULL_AMD_DISPLAY_PATH) \
+ -I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/dc \
+ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm
amdgpu-y := amdgpu_drv.o
@@ -132,4 +139,13 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
+ifneq ($(CONFIG_DRM_AMD_DC),)
+
+RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+include $(FULL_AMD_DISPLAY_PATH)/Makefile
+
+amdgpu-y += $(AMD_DISPLAY_FILES)
+
+endif
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index cbcb6a153aba..0b14b5373783 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -66,6 +66,7 @@
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
+#include "amdgpu_dm.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
@@ -101,6 +102,8 @@ extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
extern int amdgpu_vm_update_mode;
+extern int amdgpu_dc;
+extern int amdgpu_dc_log;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
@@ -714,7 +717,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr);
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring);
/*
@@ -1535,6 +1538,7 @@ struct amdgpu_device {
/* display */
bool enable_virtual_display;
struct amdgpu_mode_info mode_info;
+ /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq;
@@ -1568,18 +1572,14 @@ struct amdgpu_device {
/* sdma */
struct amdgpu_sdma sdma;
- union {
- struct {
- /* uvd */
- struct amdgpu_uvd uvd;
+ /* uvd */
+ struct amdgpu_uvd uvd;
- /* vce */
- struct amdgpu_vce vce;
- };
+ /* vce */
+ struct amdgpu_vce vce;
- /* vcn */
- struct amdgpu_vcn vcn;
- };
+ /* vcn */
+ struct amdgpu_vcn vcn;
/* firmwares */
struct amdgpu_firmware firmware;
@@ -1590,6 +1590,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* display related functionality */
+ struct amdgpu_display_manager dm;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
int num_ip_blocks;
struct mutex mn_lock;
@@ -1653,6 +1656,9 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+
/*
* Registers read & write functions.
*/
@@ -1911,5 +1917,11 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
+#if defined(CONFIG_DRM_AMD_DC)
+int amdgpu_dm_display_resume(struct amdgpu_device *adev );
+#else
+static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
+#endif
+
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 47d1c132ac40..1e3e9be7d77e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -379,29 +379,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
+ unsigned long end_jiffies;
uint32_t sdma_base_addr;
+ uint32_t data;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
- m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
- m->sdma_rlc_rb_base);
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+ usleep_range(500, 1000);
+ }
+ if (m->sdma_engine_id) {
+ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+ } else {
+ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+ }
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
-
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl);
@@ -574,9 +595,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index c21adf60a7f2..057e1ecd83ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size)
return false;
}
- tmp = bios[0x18] | (bios[0x19] << 8);
- if (bios[tmp + 0x14] != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM\n");
- return false;
- }
-
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a7afe553e0a1..f2b72c7c6857 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -911,10 +911,6 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
- struct amdgpu_crtc *amdgpu_crtc;
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- uint32_t line_time_us, vblank_lines;
struct cgs_mode_info *mode_info;
if (info == NULL)
@@ -928,30 +924,43 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
mode_info->ref_clock = adev->clock.spll.reference_freq;
}
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
- info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
- info->display_count++;
- }
- if (mode_info != NULL &&
- crtc->enabled && amdgpu_crtc->enabled &&
- amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- mode_info->vblank_time_us = vblank_lines * line_time_us;
- mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- mode_info->ref_clock = adev->clock.spll.reference_freq;
- mode_info = NULL;
+ if (!amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct drm_device *ddev = adev->ddev;
+ struct drm_crtc *crtc;
+ uint32_t line_time_us, vblank_lines;
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled) {
+ info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
+ info->display_count++;
+ }
+ if (mode_info != NULL &&
+ crtc->enabled && amdgpu_crtc->enabled &&
+ amdgpu_crtc->hw_mode.clock) {
+ line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
+ amdgpu_crtc->hw_mode.clock;
+ vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
+ amdgpu_crtc->hw_mode.crtc_vdisplay +
+ (amdgpu_crtc->v_border * 2);
+ mode_info->vblank_time_us = vblank_lines * line_time_us;
+ mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ mode_info = NULL;
+ }
}
}
+ } else {
+ info->display_count = adev->pm.pm_display_cfg.num_display;
+ if (mode_info != NULL) {
+ mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time;
+ mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh;
+ mode_info->ref_clock = adev->clock.spll.reference_freq;
+ }
}
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index f7fceb63413c..57abf7abd7a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -409,6 +409,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
if (candidate->robj == validated)
break;
+ /* We can't move pinned BOs here */
+ if (bo->pin_count)
+ continue;
+
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* Check if this BO is in one of the domains we need space for */
@@ -562,8 +566,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
- bo->tbo.ttm->num_pages,
- false);
+ bo->tbo.ttm->num_pages);
kvfree(e->user_pages);
e->user_pages = NULL;
}
@@ -694,8 +697,7 @@ error_free_pages:
continue;
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
+ e->robj->tbo.ttm->num_pages);
kvfree(e->user_pages);
}
}
@@ -1497,8 +1499,11 @@ out:
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
- /* set return value 0 to indicate success */
- r = array[first]->error;
+
+ if (first < fence_count && array[first])
+ r = array[first]->error;
+ else
+ r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index efcacb827de7..3573ecdb06ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -31,6 +31,7 @@
#include <linux/debugfs.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
@@ -2046,6 +2047,52 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
}
}
+bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
+{
+ switch (asic_type) {
+#if defined(CONFIG_DRM_AMD_DC)
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
+ return amdgpu_dc != 0;
+#endif
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ return amdgpu_dc > 0;
+ case CHIP_VEGA10:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+#endif
+ return amdgpu_dc != 0;
+#endif
+ default:
+ return false;
+ }
+}
+
+/**
+ * amdgpu_device_has_dc_support - check if dc is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return amdgpu_device_asic_has_dc_support(adev->asic_type);
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2100,7 +2147,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
-
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -2242,7 +2288,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
/* init i2c buses */
- amdgpu_atombios_i2c_init(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_atombios_i2c_init(adev);
}
/* Fence driver */
@@ -2378,7 +2425,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->accel_working = false;
cancel_delayed_work_sync(&adev->late_init_work);
/* free i2c buses */
- amdgpu_i2c_fini(adev);
+ if (!amdgpu_device_has_dc_support(adev))
+ amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
kfree(adev->bios);
adev->bios = NULL;
@@ -2429,12 +2477,14 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ }
+ drm_modeset_unlock_all(dev);
}
- drm_modeset_unlock_all(dev);
amdgpu_amdkfd_suspend(adev);
@@ -2577,13 +2627,25 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */
if (fbcon) {
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pre DCE11 */
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ drm_modeset_unlock_all(dev);
+ } else {
+ /*
+ * There is no equivalent atomic helper to turn on
+ * display, so we defined our own function for this,
+ * once suspend resume is supported by the atomic
+ * framework this will be reworked
+ */
+ amdgpu_dm_display_resume(adev);
}
- drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
@@ -2600,7 +2662,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth++;
#endif
- drm_helper_hpd_irq_event(dev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_hpd_irq_event(dev);
+ else
+ drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
@@ -2900,6 +2965,7 @@ give_up_reset:
*/
int amdgpu_gpu_reset(struct amdgpu_device *adev)
{
+ struct drm_atomic_state *state = NULL;
int i, r;
int resched;
bool need_full_reset, vram_lost = false;
@@ -2913,6 +2979,9 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ /* store modesetting */
+ if (amdgpu_device_has_dc_support(adev))
+ state = drm_atomic_helper_suspend(adev->ddev);
/* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3029,7 +3098,11 @@ out:
}
}
- drm_helper_resume_force_mode(adev->ddev);
+ if (amdgpu_device_has_dc_support(adev)) {
+ r = drm_atomic_helper_resume(adev->ddev, state);
+ amdgpu_dm_display_resume(adev);
+ } else
+ drm_helper_resume_force_mode(adev->ddev);
ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
if (r) {
@@ -3188,9 +3261,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3264,9 +3337,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
@@ -3614,12 +3687,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0x7F);
- se = ((*pos >> 7) & 0xFF);
- sh = ((*pos >> 15) & 0xFF);
- cu = ((*pos >> 23) & 0xFF);
- wave = ((*pos >> 31) & 0xFF);
- simd = ((*pos >> 37) & 0xFF);
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
@@ -3664,14 +3737,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
return -EINVAL;
/* decode offset */
- offset = (*pos & 0xFFF); /* in dwords */
- se = ((*pos >> 12) & 0xFF);
- sh = ((*pos >> 20) & 0xFF);
- cu = ((*pos >> 28) & 0xFF);
- wave = ((*pos >> 36) & 0xFF);
- simd = ((*pos >> 44) & 0xFF);
- thread = ((*pos >> 52) & 0xFF);
- bank = ((*pos >> 60) & 1);
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6ad243293a78..138beb550a58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -518,7 +518,7 @@ amdgpu_framebuffer_init(struct drm_device *dev,
return 0;
}
-static struct drm_framebuffer *
+struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -556,7 +556,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return &amdgpu_fb->base;
}
-static void amdgpu_output_poll_changed(struct drm_device *dev)
+void amdgpu_output_poll_changed(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
amdgpu_fb_output_poll_changed(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
new file mode 100644
index 000000000000..3cc0ef0c055e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DISPLAY_H__
+#define __AMDGPU_DISPLAY_H__
+
+struct drm_framebuffer *
+amdgpu_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+
+void amdgpu_output_poll_changed(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 7279fb5c3abc..56caaeee6fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -433,7 +433,7 @@ struct amdgpu_pm {
uint32_t fw_version;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
+ struct amd_pp_display_configuration pm_display_cfg;/* set by dc */
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index dd2f060d62a8..c2f414ffb2cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -106,6 +106,8 @@ int amdgpu_vm_debug = 0;
int amdgpu_vram_page_split = 512;
int amdgpu_vm_update_mode = -1;
int amdgpu_exp_hw_support = 0;
+int amdgpu_dc = -1;
+int amdgpu_dc_log = 0;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
@@ -211,6 +213,12 @@ module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+module_param_named(dc, amdgpu_dc, int, 0444);
+
+MODULE_PARM_DESC(dc, "Display Core Log Level (0 = minimal (default), 1 = chatty");
+module_param_named(dc_log, amdgpu_dc_log, int, 0444);
+
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
@@ -518,17 +526,17 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
/* Vega 10 */
- {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */
- {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 562930b17a6d..90fa8e8bc6fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -42,11 +42,6 @@
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass.
*/
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct amdgpu_device *adev;
-};
static int
amdgpufb_open(struct fb_info *info, int user)
@@ -353,7 +348,8 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(adev->ddev);
+ if (!amdgpu_device_has_dc_support(adev))
+ drm_helper_disable_unused_functions(adev->ddev);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index fb9f88ef6059..2fa95aef74d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -268,9 +268,10 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
*
* Checks for fence activity.
*/
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = (void *)arg;
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
amdgpu_fence_process(ring);
}
@@ -286,7 +287,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
@@ -350,7 +351,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
- emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ emitted += READ_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
@@ -422,8 +423,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
- (unsigned long)ring);
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index fb72edc4c026..e87eedcc0da9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -63,6 +63,11 @@ retry:
flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
@@ -323,7 +328,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
- goto unlock_mmap_sem;
+ goto release_object;
r = amdgpu_bo_reserve(bo, true);
if (r)
@@ -346,10 +351,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return 0;
free_pages:
- release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
-
-unlock_mmap_sem:
- up_read(&current->mm->mmap_sem);
+ release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
release_object:
drm_gem_object_put_unlocked(gobj);
@@ -556,9 +558,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
dev_err(&dev->pdev->dev,
- "va_address 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->va_address,
- AMDGPU_VA_RESERVED_SIZE);
+ "va_address 0x%LX is in reserved area 0x%LX\n",
+ args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
@@ -786,11 +787,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- offset = ACCESS_ONCE(bo->tbo.mem.start);
+ offset = READ_ONCE(bo->tbo.mem.start);
if (offset != AMDGPU_BO_INVALID_OFFSET)
seq_printf(m, " @ 0x%010Lx", offset);
- pin_count = ACCESS_ONCE(bo->pin_count);
+ pin_count = READ_ONCE(bo->pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 33535d347734..00e0ce10862f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -71,12 +71,6 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 538e5f27d120..47c5ce9807db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -37,6 +37,10 @@
#include <linux/pm_runtime.h>
+#ifdef CONFIG_DRM_AMD_DC
+#include "amdgpu_dm_irq.h"
+#endif
+
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
/*
@@ -221,15 +225,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- if (!adev->enable_virtual_display)
- /* Disable vblank irqs aggressively for power-saving */
- adev->ddev->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
- if (r) {
- return r;
- }
-
/* enable msi */
adev->irq.msi_enabled = false;
@@ -241,7 +236,21 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
- INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+ if (!amdgpu_device_has_dc_support(adev)) {
+ if (!adev->enable_virtual_display)
+ /* Disable vblank irqs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev->ddev->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* pre DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ }
+
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 6f0b26dae3b0..720139e182a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -1030,7 +1030,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW)
};
const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 2af2678ddaf6..ffde1e9666e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,11 +38,15 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
#include "amdgpu_irq.h"
+#include <drm/drm_dp_mst_helper.h>
+#include "modules/inc/mod_freesync.h"
+
struct amdgpu_bo;
struct amdgpu_device;
struct amdgpu_encoder;
@@ -53,9 +57,13 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
+#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
+
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
+#define AMDGPU_MAX_PLANES 6
#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
@@ -292,6 +300,30 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
+ /* it is used to enter or exit into free sync mode */
+ int (*notify_freesync)(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+ /* it is used to allow enablement of freesync mode */
+ int (*set_freesync_property)(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val);
+
+
+};
+
+struct amdgpu_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+
+ /* caching for later use */
+ uint64_t address;
+};
+
+struct amdgpu_fbdev {
+ struct drm_fb_helper helper;
+ struct amdgpu_framebuffer rfb;
+ struct list_head fbdev_list;
+ struct amdgpu_device *adev;
};
struct amdgpu_mode_info {
@@ -299,6 +331,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -328,6 +361,7 @@ struct amdgpu_mode_info {
int num_dig; /* number of dig blocks */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ const enum drm_plane_type *plane_type;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
@@ -400,6 +434,14 @@ struct amdgpu_crtc {
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
+
+ int otg_inst;
+ struct drm_pending_vblank_event *event;
+};
+
+struct amdgpu_plane {
+ struct drm_plane base;
+ enum drm_plane_type plane_type;
};
struct amdgpu_encoder_atom_dig {
@@ -489,6 +531,19 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1,
};
+struct amdgpu_dm_dp_aux {
+ struct drm_dp_aux aux;
+ struct ddc_service *ddc_service;
+};
+
+struct amdgpu_i2c_adapter {
+ struct i2c_adapter base;
+
+ struct ddc_service *ddc_service;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
struct amdgpu_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -500,6 +555,14 @@ struct amdgpu_connector {
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+ const struct dc_stream *stream;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
@@ -510,11 +573,39 @@ struct amdgpu_connector {
enum amdgpu_connector_audio audio;
enum amdgpu_connector_dither dither;
unsigned pixelclock_for_modeset;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+ struct semaphore mst_sem;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
};
-struct amdgpu_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+ struct amdgpu_connector base;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_connector *mst_port;
+ bool is_mst_connector;
+ struct amdgpu_encoder *mst_encoder;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index a59e04f3eeba..6c570d4e4516 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -946,6 +946,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* no skipping for powerplay */
+ if (adev->powerplay.cgs_device)
+ return effective_mode;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1466,7 +1470,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
+ if (amdgpu_crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 90af8e82b16a..ae9c106979d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -169,10 +169,14 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct dma_buf *buf;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ buf = drm_gem_prime_export(dev, gobj, flags);
+ if (!IS_ERR(buf))
+ buf->file->f_mapping = dev->anon_inode->i_mapping;
+ return buf;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index 190e28cb827e..93d86619e802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -63,7 +63,7 @@ static int amdgpu_update_cached_map(struct amdgpu_queue_mapper *mapper,
static int amdgpu_identity_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int ring,
+ u32 ring,
struct amdgpu_ring **out_ring)
{
switch (mapper->hw_ip) {
@@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int user_ring, bool lru_pipe_order,
+ u32 user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
@@ -208,7 +208,7 @@ int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
*/
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
int r, ip_num_rings;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 213988f336ed..f337c316ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index 9ec96b9e85d1..b160b958e5fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010.
*
* Permission is hereby granted, free of charge, to any person obtaining a
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 1f036af85ba6..ad5bf86ee8a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -746,7 +746,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
return 0;
release_pages:
- release_pages(pages, pinned, 0);
+ release_pages(pages, pinned);
up_read(&current->mm->mmap_sem);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index aa914256b4bc..bae77353447b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -94,7 +94,8 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
+
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 26e900627971..4acca92f6a52 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -68,11 +68,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 567c4a5cf90c..a296f7bbe57c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -65,6 +65,7 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
#include "dce_virtual.h"
@@ -1022,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] =
{mmPA_SC_RASTER_CONFIG_1, true},
};
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num,
- u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
{
- uint32_t val;
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1047,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+ bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
continue;
- *value = cik_allowed_read_registers[i].grbm_indexed ?
- cik_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+ reg_offset);
return 0;
}
return -EINVAL;
@@ -1900,6 +1980,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
@@ -1914,6 +1998,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
@@ -1928,6 +2016,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
@@ -1943,6 +2035,10 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 00868764a0dd..419ba0ce7ee5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1819,6 +1819,22 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
}
+
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
@@ -4670,6 +4686,14 @@ static int gfx_v7_0_sw_fini(void *handle)
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->gfx.rlc.cp_table_size) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v7_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8002ac3e536..9ecdf621a74a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -2118,6 +2118,15 @@ static int gfx_v8_0_sw_fini(void *handle)
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v8_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7f15bb2c5233..da43813d67a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -207,6 +207,12 @@ static const u32 golden_settings_gc_9_1_rv1[] =
SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
};
+static const u32 golden_settings_gc_9_x_common[] =
+{
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+};
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -242,6 +248,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
default:
break;
}
+
+ amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
@@ -988,12 +997,22 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t start, uint32_t size,
+ uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, thread,
+ start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -1449,6 +1468,14 @@ static int gfx_v9_0_sw_fini(void *handle)
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->asic_type == CHIP_RAVEN) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v9_0_free_microcode(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 621699331e09..c8f1aebeac7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -392,7 +392,16 @@ static int gmc_v9_0_early_init(void *handle)
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 0, 1: idle
+ * Engine 2, 3: firmware
+ * Engine 4~13: amdgpu ring, subject to change when ring number changes
+ * Engine 14~15: idle
+ * Engine 16: kfd tlb invalidation
+ * Engine 17: Gart flushes
+ */
+ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
@@ -405,9 +414,9 @@ static int gmc_v9_0_late_init(void *handle)
ring->funcs->vmhub);
}
- /* Engine 17 is used for GART flushes */
+ /* Engine 16 is used for KFD and 17 for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ BUG_ON(vm_inv_eng[i] > 16);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 3ca9d114f630..4e67fe1e7955 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -532,6 +532,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &uvd_v7_0_ip_block);
@@ -545,6 +551,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#else
+# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
+#endif
amdgpu_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_ip_block_add(adev, &sdma_v4_0_ip_block);
amdgpu_ip_block_add(adev, &vcn_v1_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 2581543b35a7..920910ac8663 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -371,6 +371,10 @@ static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->flags & AMD_IS_APU) &&
+ (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
+ return -ENOENT;
+
uvd_v6_0_set_ring_funcs(adev);
if (uvd_v6_0_enc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 90332f55cfba..cf81065e3c5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -365,15 +365,10 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
- /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
if ((adev->asic_type == CHIP_FIJI) ||
- (adev->asic_type == CHIP_STONEY) ||
- (adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12))
+ (adev->asic_type == CHIP_STONEY))
return AMDGPU_VCE_HARVEST_VCE1;
- /* Tonga and CZ are dual or single pipe */
if (adev->flags & AMD_IS_APU)
tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
VCE_HARVEST_FUSE_MACRO__MASK) >>
@@ -391,6 +386,11 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
case 3:
return AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
default:
+ if ((adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
+ return AMDGPU_VCE_HARVEST_VCE1;
+
return 0;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 1eb4d79d6e30..0450ac5ba6b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1175,7 +1175,7 @@ static const struct amdgpu_irq_src_funcs vcn_v1_0_irq_funcs = {
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index f3cfef48aa99..3a4c2fa7e36d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -77,6 +77,7 @@
#endif
#include "dce_virtual.h"
#include "mxgpu_vi.h"
+#include "amdgpu_dm.h"
/*
* Indirect registers accessor
@@ -1502,6 +1503,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_1_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1518,6 +1523,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v10_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1536,6 +1545,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1550,6 +1563,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_0_ip_block);
@@ -1567,6 +1584,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display)
amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_ip_block_add(adev, &dm_ip_block);
+#endif
else
amdgpu_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_ip_block_add(adev, &gfx_v8_1_ip_block);
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index b400d5664252..7bb0bc0ca3d6 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Heterogenous System Architecture support for AMD GPU devices
#
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 6c5a9cab55de..f744caeaee04 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
+#include <linux/printk.h>
#include "kfd_priv.h"
#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
@@ -132,7 +133,7 @@ static void __exit kfd_module_exit(void)
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
- dev_info(kfd_device, "Removed module\n");
+ pr_info("amdkfd: Removed module\n");
}
module_init(kfd_module_init);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 4859d263fa2a..4728fad3fd74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -202,8 +202,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
- SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 2bec902fc939..a3f1e62c60ba 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -191,6 +191,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
+ if (dev->dqm->queue_count >=
+ CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ pr_err("Over-subscription is not allowed for SDMA.\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+
+ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ if (retval != 0)
+ goto err_create_queue;
+ pqn->q = q;
+ pqn->kq = NULL;
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+ &q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
+ print_queue(q);
+ break;
+
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
new file mode 100644
index 000000000000..ec3285f65517
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -0,0 +1,45 @@
+menu "Display Engine Configuration"
+ depends on DRM && DRM_AMDGPU
+
+config DRM_AMD_DC
+ bool "AMD DC - Enable new display engine"
+ default y
+ help
+ Choose this option if you want to use the new display engine
+ support for AMDGPU. This adds required support for Vega and
+ Raven ASICs.
+
+config DRM_AMD_DC_PRE_VEGA
+ bool "DC support for Polaris and older ASICs"
+ default n
+ help
+ Choose this option to enable the new DC support for older asics
+ by default. This includes Polaris, Carrizo, Tonga, Bonaire,
+ and Hawaii.
+
+config DRM_AMD_DC_FBC
+ bool "AMD FBC - Enable Frame Buffer Compression"
+ depends on DRM_AMD_DC
+ help
+ Choose this option if you want to use frame buffer compression
+ support.
+ This is a power optimisation feature, check its availability
+ on your hardware before enabling this option.
+
+
+config DRM_AMD_DC_DCN1_0
+ bool "DCN 1.0 Raven family"
+ depends on DRM_AMD_DC && X86
+ help
+ Choose this option if you want to have
+ RV family for display engine
+
+config DEBUG_KERNEL_DC
+ bool "Enable kgdb break in DC"
+ depends on DRM_AMD_DC
+ help
+ Choose this option
+ if you want to hit
+ kdgb_break in assert.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
new file mode 100644
index 000000000000..8ba37dd9cf7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the DAL (Display Abstract Layer), which is a sub-component
+# of the AMDGPU drm driver.
+# It provides the HW control for display related functionalities.
+
+AMDDALPATH = $(RELATIVE_AMD_DISPLAY_PATH)
+
+subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/inc/hw
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
+
+#TODO: remove when Timing Sync feature is complete
+subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
+
+DAL_LIBS = amdgpu_dm dc modules/freesync
+
+AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
+
+include $(AMD_DAL)
diff --git a/drivers/gpu/drm/amd/display/TODO b/drivers/gpu/drm/amd/display/TODO
new file mode 100644
index 000000000000..46464678f2b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/TODO
@@ -0,0 +1,107 @@
+===============================================================================
+TODOs
+===============================================================================
+
+1. Base this on drm-next - WIP
+
+
+2. Cleanup commit history
+
+
+3. WIP - Drop page flip helper and use DRM's version
+
+
+4. DONE - Flatten all DC objects
+ * dc_stream/core_stream/stream should just be dc_stream
+ * Same for other DC objects
+
+ "Is there any major reason to keep all those abstractions?
+
+ Could you collapse everything into struct dc_stream?
+
+ I haven't looked recently but I didn't get the impression there was a
+ lot of design around what was public/protected, more whatever needed
+ to be used by someone else was in public."
+ ~ Dave Airlie
+
+
+5. DONE - Rename DC objects to align more with DRM
+ * dc_surface -> dc_plane_state
+ * dc_stream -> dc_stream_state
+
+
+6. DONE - Per-plane and per-stream validation
+
+
+7. WIP - Per-plane and per-stream commit
+
+
+8. WIP - Split pipe_ctx into plane and stream resource structs
+
+
+9. Attach plane and stream reources to state object instead of validate_context
+
+
+10. Remove dc_edid_caps and drm_helpers_parse_edid_caps
+ * Use drm_display_info instead
+ * Remove DC's edid quirks and rely on DRM's quirks (add quirks if needed)
+
+ "Making sure you use the sink-specific helper libraries and kernel
+ subsystems, since there's really no good reason to have 2nd
+ implementation of those in the kernel. Looks likes that's done for mst
+ and edid parsing. There's still a bit a midlayer feeling to the edid
+ parsing side (e.g. dc_edid_caps and dm_helpers_parse_edid_caps, I
+ think it'd be much better if you convert that over to reading stuff
+ from drm_display_info and if needed, push stuff into the core). Also,
+ I can't come up with a good reason why DC needs all this (except to
+ reimplement half of our edid quirk table, which really isn't a good
+ idea). Might be good if you put this onto the list of things to fix
+ long-term, but imo not a blocker. Definitely make sure new stuff
+ doesn't slip in (i.e. if you start adding edid quirks to DC instead of
+ the drm core, refactoring to use the core edid stuff was pointless)."
+ ~ Daniel Vetter
+
+
+11. Remove dc/i2caux. This folder can be somewhat misleading. It's basically an
+overy complicated HW programming function for sendind and receiving i2c/aux
+commands. We can greatly simplify that and move it into dc/dceXYZ like other
+HW blocks.
+
+12. drm_modeset_lock in MST should no longer be needed in recent kernels
+ * Adopt appropriate locking scheme
+
+13. get_modes and best_encoder callbacks look a bit funny. Can probably rip out
+a few indirections, and consider removing entirely and using the
+drm_atomic_helper_best_encoder default behaviour.
+
+14. core/dc_debug.c, consider switching to the atomic state debug helpers and
+moving all your driver state printing into the various atomic_print_state
+callbacks. There's also plans to expose this stuff in a standard way across all
+drivers, to make debugging userspace compositors easier across different hw.
+
+15. Move DP/HDMI dual mode adaptors to drm_dp_dual_mode_helper.c. See
+dal_ddc_service_i2c_query_dp_dual_mode_adaptor.
+
+16. Move to core SCDC helpers (I think those are new since initial DC review).
+
+17. There's still a pretty massive layer cake around dp aux and DPCD handling,
+with like 3 levels of abstraction and using your own structures instead of the
+stuff in drm_dp_helper.h. drm_dp_helper.h isn't really great and already has 2
+incompatible styles, just means more reasons not to add a third (or well third
+one gets to do the cleanup refactor).
+
+18. There's a pile of sink handling code, both for DP and HDMI where I didn't
+immediately recognize the standard. I think long term it'd be best for the drm
+subsystem if we try to move as much of that into helpers/core as possible, and
+share it with drivers. But that's a very long term goal, and by far not just an
+issue with DC - other drivers, especially around DP sink handling, are equally
+guilty.
+
+19. The DC logger is still a rather sore thing, but I know that the DRM_DEBUG
+stuff just isn't up to the challenges either. We need to figure out something
+that integrates better with DRM and linux debug printing, while not being
+useless with filtering output. dynamic debug printing might be an option.
+
+20. Use kernel i2c device to program HDMI retimer. Some boards have an HDMI
+retimer that we need to program to pass PHY compliance. Currently that's
+bypassing the i2c device and goes directly to HW. This should be changed.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
new file mode 100644
index 000000000000..4699e47aa76b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for the 'dm' sub-component of DAL.
+# It provides the control and status of dm blocks.
+
+
+
+AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o
+
+ifneq ($(CONFIG_DRM_AMD_DC),)
+AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc
+
+AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
+
+AMD_DISPLAY_FILES += $(AMDGPU_DM)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
new file mode 100644
index 000000000000..c324c3b76fac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -0,0 +1,4932 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "dc/inc/core_types.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_pm.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "dm_services_types.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/types.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+
+#include "modules/inc/mod_freesync.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "soc15_common.h"
+#endif
+
+#include "modules/inc/mod_freesync.h"
+
+#include "i2caux_interface.h"
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+/* initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs);
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t link_index);
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ uint32_t link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+
+
+
+static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+};
+
+static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
+};
+
+static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_PRIMARY,
+ DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
+};
+
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc_state->stream);
+ }
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+ else {
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
+ acrtc->base.state);
+
+ if (acrtc_state->stream == NULL) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc_state->stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+ }
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ /*
+ * following if is check inherited from both functions where this one is
+ * used now. Need to be checked why it could happen.
+ */
+ if (otg_inst == -1) {
+ WARN_ON(1);
+ return adev->mode_info.crtcs[0];
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ unsigned long flags;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /*TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id,
+ amdgpu_crtc);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return;
+ }
+
+
+ /* wakeup usersapce */
+ if (amdgpu_crtc->event) {
+ /* Update to correct count/ts if racing with vblank irq */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->event = NULL;
+
+ } else
+ WARN_ON(1);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
+ __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+}
+
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ uint8_t crtc_index = 0;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+
+ if (acrtc)
+ crtc_index = acrtc->crtc_id;
+
+ drm_handle_vblank(adev->ddev, crtc_index);
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void* handle);
+
+static void hotplug_notify_work_func(struct work_struct *work)
+{
+ struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work);
+ struct drm_device *dev = dm->ddev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dal_asic_id.h"
+/* Allocate memory for FBC compressed data */
+/* TODO: Dynamic allocation */
+#define AMDGPU_FBC_SIZE (3840 * 2160 * 4)
+
+static void amdgpu_dm_initialize_fbc(struct amdgpu_device *adev)
+{
+ int r;
+ struct dm_comressor_info *compressor = &adev->dm.compressor;
+
+ if (!compressor->bo_ptr) {
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_FBC_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize fbc\n");
+ }
+
+}
+#endif
+
+
+/* Init display KMS
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ adev->dm.ddev = adev->ddev;
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+
+ /* initialize DAL's lock (for SYNC context use) */
+ spin_lock_init(&adev->dm.dal_lock);
+
+ /* initialize DAL's mutex */
+ mutex_init(&adev->dm.dal_mutex);
+
+ if(amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+
+ init_data.asic_id.vram_width = adev->mc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ adev->dm.dal = NULL;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ if (amdgpu_dc_log)
+ init_data.log_mask = DC_DEFAULT_LOG_MASK;
+ else
+ init_data.log_mask = DC_MIN_LOG_MASK;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (adev->family == FAMILY_CZ)
+ amdgpu_dm_initialize_fbc(adev);
+ init_data.fbc_gpu_addr = adev->dm.compressor.gpu_addr;
+#endif
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core initialized!\n");
+ } else {
+ DRM_INFO("Display Core failed to initialize!\n");
+ goto error;
+ }
+
+ INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func);
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -1;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ return;
+}
+
+static int dm_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int dm_sw_fini(void *handle)
+{
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
+ return ret;
+ }
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct drm_device *dev = ((struct amdgpu_device *)handle)->ddev;
+
+ return detect_mst_link_for_all_connectors(dev);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ !aconnector->mst_port) {
+
+ if (suspend)
+ drm_dp_mst_topology_mgr_suspend(&aconnector->mst_mgr);
+ else
+ drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr);
+ }
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ s3_handle_mst(adev->ddev, true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+
+ return ret;
+}
+
+static struct amdgpu_dm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ uint32_t i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return to_amdgpu_dm_connector(connector);
+ }
+
+ return NULL;
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /* power on hardware */
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ return 0;
+}
+
+int amdgpu_dm_display_resume(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev->ddev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+
+ int ret = 0;
+ int i;
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /* On resume we need to rewrite the MSTM control bits to enamble MST*/
+ s3_handle_mst(ddev, false);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* Do detection*/
+ list_for_each_entry(connector,
+ &ddev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * this is the case when traversing through already created
+ * MST connectors, should be skipped
+ */
+ if (aconnector->mst_port)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+
+ /* Force mode set in atomic comit */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
+
+ adev->dm.cached_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ return ret;
+}
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+static struct drm_atomic_state *
+dm_atomic_state_alloc(struct drm_device *dev)
+{
+ struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return NULL;
+
+ if (drm_atomic_state_init(dev, &state->base) < 0)
+ goto fail;
+
+ return &state->base;
+
+fail:
+ kfree(state);
+ return NULL;
+}
+
+static void
+dm_atomic_state_clear(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state->context) {
+ dc_release_state(dm_state->context);
+ dm_state->context = NULL;
+ }
+
+ drm_atomic_state_default_clear(state);
+}
+
+static void
+dm_atomic_state_alloc_free(struct drm_atomic_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ drm_atomic_state_default_release(state);
+ kfree(dm_state);
+}
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_user_framebuffer_create,
+ .output_poll_changed = amdgpu_output_poll_changed,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = amdgpu_dm_atomic_commit,
+ .atomic_state_alloc = dm_atomic_state_alloc,
+ .atomic_state_clear = dm_atomic_state_clear,
+ .atomic_state_free = dm_atomic_state_alloc_free
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
+};
+
+static void
+amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+
+ sink = aconnector->dc_link->local_sink;
+
+ /* Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * don't do it here if u are during boot
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /* For S3 resume with headless use eml_sink to fake stream
+ * because on resume connecotr->sink is set ti NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+ /* retain and release bellow are used for
+ * bump up refcount for sink because the link don't point
+ * to it anymore after disconnect so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, aconnector->edid);
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ if (!aconnector->dc_sink)
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ else if (aconnector->dc_sink != aconnector->dc_em_sink)
+ dc_sink_retain(aconnector->dc_sink);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return;
+
+ if (aconnector->dc_sink == sink) {
+ /* We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!! */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do */
+ if (sink) {
+ /* TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here. */
+ if (aconnector->dc_sink)
+ amdgpu_dm_remove_sink_from_freesync_module(
+ connector);
+
+ aconnector->dc_sink = sink;
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ } else {
+ aconnector->edid =
+ (struct edid *) sink->dc_edid.raw_edid;
+
+
+ drm_mode_connector_update_edid_property(connector,
+ aconnector->edid);
+ }
+ amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
+
+ } else {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ drm_mode_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ aconnector->dc_sink = NULL;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+
+ /* In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in it's own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ int dpcd_bytes_to_read;
+
+ const int max_process_count = 30;
+ int process_count = 0;
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ while (dret == dpcd_bytes_to_read &&
+ process_count < max_process_count) {
+ uint8_t retry;
+ dret = 0;
+
+ process_count++;
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+ /* handle HPD short pulse irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq(
+ &aconnector->mst_mgr,
+ esi,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ const int ack_dpcd_bytes_to_write =
+ dpcd_bytes_to_read - 1;
+
+ for (retry = 0; retry < 3; retry++) {
+ uint8_t wret;
+
+ wret = drm_dp_dpcd_write(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ &esi[1],
+ ack_dpcd_bytes_to_write);
+ if (wret == ack_dpcd_bytes_to_write)
+ break;
+ }
+
+ /* check if there is new irq to be handle */
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+
+ /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_handle_hpd_rx_irq(dc_link, NULL) &&
+ !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ }
+ }
+ if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ (dc_link->type == dc_connection_mst_branch))
+ dm_handle_hpd_rx_irq(aconnector);
+
+ if (dc_link->type != dc_connection_mst_branch)
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY;
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN)
+ client_id = AMDGPU_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling. */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /* Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ * */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+ /* indicate support of immediate flip */
+ adev->ddev->mode_config.async_page_flip = true;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+ if (dc_link_set_backlight_level(dm->backlight_link,
+ bd->props.brightness, 0, 0))
+ return 0;
+ else
+ return 1;
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+{
+ char bl_name[16];
+ struct backlight_properties props = { 0 };
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ dm->adev->ddev->primary->index);
+
+ dm->backlight_dev = backlight_device_register(bl_name,
+ dm->adev->ddev->dev,
+ dm,
+ &amdgpu_dm_backlight_ops,
+ &props);
+
+ if (IS_ERR(dm->backlight_dev))
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+#endif
+
+/* In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ uint32_t i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ uint32_t link_cnt;
+ unsigned long possible_crtcs;
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -1;
+ }
+
+ for (i = 0; i < dm->dc->caps.max_planes; i++) {
+ struct amdgpu_plane *plane;
+
+ plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ mode_info->planes[i] = plane;
+
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ goto fail;
+ }
+ plane->base.type = mode_info->plane_type[i];
+
+ /*
+ * HACK: IGT tests expect that each plane can only have one
+ * one possible CRTC. For now, set one CRTC for each
+ * plane that is not an underlay, but still allow multiple
+ * CRTCs for underlay planes.
+ */
+ possible_crtcs = 1 << i;
+ if (i >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ if (amdgpu_dm_plane_init(dm, mode_info->planes[i], possible_crtcs)) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dc_link_detect(dc_get_link_at_index(dm->dc, i),
+ DETECT_REASON_BOOT))
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGA10:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ /*
+ * Temporary disable until pplib/smu interaction is implemented
+ */
+ dm->dc->debug.disable_stutter = true;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+ for (i = 0; i < dm->dc->caps.max_planes; i++)
+ kfree(mode_info->planes[i]);
+ return -1;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_mode_config_cleanup(dm->ddev);
+ return;
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/**
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+ u8 level)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+}
+
+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+ return 0;
+}
+
+static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct mod_freesync_params freesync_params;
+ uint8_t num_streams;
+ uint8_t i;
+
+ struct amdgpu_device *adev = dev->dev_private;
+ int r = 0;
+
+ /* Get freesync enable flag from DRM */
+
+ num_streams = dc_get_current_stream_count(adev->dm.dc);
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream;
+ stream = dc_get_stream_at_index(adev->dm.dc, i);
+
+ mod_freesync_update_state(adev->dm.freesync_module,
+ &stream, 1, &freesync_params);
+ }
+
+ return r;
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .vblank_wait = NULL,
+ .backlight_set_level =
+ dm_set_backlight_level,/* called unconditionally */
+ .backlight_get_level =
+ dm_get_backlight_level,/* called unconditionally */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+ .notify_freesync = amdgpu_notify_freesync,
+
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_dev->dev_private;
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ amdgpu_dm_display_resume(adev);
+ drm_kms_helper_hotplug_event(adev->ddev);
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ adev->ddev->driver->driver_features |= DRIVER_ATOMIC;
+ amdgpu_dm_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_carizzo;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ adev->mode_info.plane_type = dm_plane_type_stoney;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_POLARIS10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+ case CHIP_VEGA10:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case CHIP_RAVEN:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ adev->mode_info.plane_type = dm_plane_type_default;
+ break;
+#endif
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /* Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init() */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev->ddev->dev,
+ &dev_attr_s3_debug);
+#endif
+
+ return 0;
+}
+
+struct dm_connector_state {
+ struct drm_connector_state base;
+
+ enum amdgpu_rmx_type scaling;
+ uint8_t underscan_vborder;
+ uint8_t underscan_hborder;
+ bool underscan_enable;
+};
+
+#define to_dm_connector_state(x)\
+ container_of((x), struct dm_connector_state, base)
+
+static bool modeset_required(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_stream_state *old_stream)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ if (!crtc_state->enable)
+ return false;
+
+ return crtc_state->active;
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return false;
+
+ return !crtc_state->enable || !crtc_state->active;
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
+ struct dc_plane_state *plane_state)
+{
+ plane_state->src_rect.x = state->src_x >> 16;
+ plane_state->src_rect.y = state->src_y >> 16;
+ /*we ignore for now mantissa and do not to deal with floating pixels :(*/
+ plane_state->src_rect.width = state->src_w >> 16;
+
+ if (plane_state->src_rect.width == 0)
+ return false;
+
+ plane_state->src_rect.height = state->src_h >> 16;
+ if (plane_state->src_rect.height == 0)
+ return false;
+
+ plane_state->dst_rect.x = state->crtc_x;
+ plane_state->dst_rect.y = state->crtc_y;
+
+ if (state->crtc_w == 0)
+ return false;
+
+ plane_state->dst_rect.width = state->crtc_w;
+
+ if (state->crtc_h == 0)
+ return false;
+
+ plane_state->dst_rect.height = state->crtc_h;
+
+ plane_state->clip_rect = plane_state->dst_rect;
+
+ switch (state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_state->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_state->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_state->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_state->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+ return true;
+}
+static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+ uint64_t *tiling_flags,
+ uint64_t *fb_location)
+{
+ struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ int r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ // Don't show error msg. when return -ERESTARTSYS
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ if (fb_location)
+ *fb_location = amdgpu_bo_gpu_offset(rbo);
+
+ if (tiling_flags)
+ amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+
+ amdgpu_bo_unreserve(rbo);
+
+ return r;
+}
+
+static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
+ struct dc_plane_state *plane_state,
+ const struct amdgpu_framebuffer *amdgpu_fb,
+ bool addReq)
+{
+ uint64_t tiling_flags;
+ uint64_t fb_location = 0;
+ uint64_t chroma_addr = 0;
+ unsigned int awidth;
+ const struct drm_framebuffer *fb = &amdgpu_fb->base;
+ int ret = 0;
+ struct drm_format_name_buf format_name;
+
+ ret = get_fb_info(
+ amdgpu_fb,
+ &tiling_flags,
+ addReq == true ? &fb_location:NULL);
+
+ if (ret)
+ return ret;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen format %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
+ return -EINVAL;
+ }
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
+ plane_state->address.grph.addr.low_part = lower_32_bits(fb_location);
+ plane_state->address.grph.addr.high_part = upper_32_bits(fb_location);
+ plane_state->plane_size.grph.surface_size.x = 0;
+ plane_state->plane_size.grph.surface_size.y = 0;
+ plane_state->plane_size.grph.surface_size.width = fb->width;
+ plane_state->plane_size.grph.surface_size.height = fb->height;
+ plane_state->plane_size.grph.surface_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_SRGB;
+
+ } else {
+ awidth = ALIGN(fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(fb_location);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(fb_location);
+ chroma_addr = fb_location + (u64)(awidth * fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ plane_state->plane_size.video.luma_size.x = 0;
+ plane_state->plane_size.video.luma_size.y = 0;
+ plane_state->plane_size.video.luma_size.width = awidth;
+ plane_state->plane_size.video.luma_size.height = fb->height;
+ /* TODO: unhardcode */
+ plane_state->plane_size.video.luma_pitch = awidth;
+
+ plane_state->plane_size.video.chroma_size.x = 0;
+ plane_state->plane_size.video.chroma_size.y = 0;
+ plane_state->plane_size.video.chroma_size.width = awidth;
+ plane_state->plane_size.video.chroma_size.height = fb->height;
+ plane_state->plane_size.video.chroma_pitch = awidth / 2;
+
+ /* TODO: unhardcode */
+ plane_state->color_space = COLOR_SPACE_YCBCR709;
+ }
+
+ memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
+
+ /* Fill GFX8 params */
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
+ unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
+
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+ /* XXX fix me for VI */
+ plane_state->tiling_info.gfx8.num_banks = num_banks;
+ plane_state->tiling_info.gfx8.array_mode =
+ DC_ARRAY_2D_TILED_THIN1;
+ plane_state->tiling_info.gfx8.tile_split = tile_split;
+ plane_state->tiling_info.gfx8.bank_width = bankw;
+ plane_state->tiling_info.gfx8.bank_height = bankh;
+ plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
+ plane_state->tiling_info.gfx8.tile_mode =
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY;
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
+ == DC_ARRAY_1D_TILED_THIN1) {
+ plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+ }
+
+ plane_state->tiling_info.gfx8.pipe_config =
+ AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+
+ if (adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_RAVEN) {
+ /* Fill GFX9 params */
+ plane_state->tiling_info.gfx9.num_pipes =
+ adev->gfx.config.gb_addr_config_fields.num_pipes;
+ plane_state->tiling_info.gfx9.num_banks =
+ adev->gfx.config.gb_addr_config_fields.num_banks;
+ plane_state->tiling_info.gfx9.pipe_interleave =
+ adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
+ plane_state->tiling_info.gfx9.num_shader_engines =
+ adev->gfx.config.gb_addr_config_fields.num_se;
+ plane_state->tiling_info.gfx9.max_compressed_frags =
+ adev->gfx.config.gb_addr_config_fields.max_compress_frags;
+ plane_state->tiling_info.gfx9.num_rb_per_se =
+ adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
+ plane_state->tiling_info.gfx9.swizzle =
+ AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
+ plane_state->tiling_info.gfx9.shaderEnable = 1;
+ }
+
+ plane_state->visible = true;
+ plane_state->scaling_quality.h_taps_c = 0;
+ plane_state->scaling_quality.v_taps_c = 0;
+
+ /* is this needed? is plane_state zeroed at allocation? */
+ plane_state->scaling_quality.h_taps = 0;
+ plane_state->scaling_quality.v_taps = 0;
+ plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ return ret;
+
+}
+
+static void fill_gamma_from_crtc_state(const struct drm_crtc_state *crtc_state,
+ struct dc_plane_state *plane_state)
+{
+ int i;
+ struct dc_gamma *gamma;
+ struct drm_color_lut *lut =
+ (struct drm_color_lut *) crtc_state->gamma_lut->data;
+
+ gamma = dc_create_gamma();
+
+ if (gamma == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+ gamma->type = GAMMA_RGB_256;
+ gamma->num_entries = GAMMA_RGB_256_ENTRIES;
+ for (i = 0; i < GAMMA_RGB_256_ENTRIES; i++) {
+ gamma->entries.red[i] = dal_fixed31_32_from_int(lut[i].red);
+ gamma->entries.green[i] = dal_fixed31_32_from_int(lut[i].green);
+ gamma->entries.blue[i] = dal_fixed31_32_from_int(lut[i].blue);
+ }
+
+ plane_state->gamma_correction = gamma;
+}
+
+static int fill_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state,
+ bool addrReq)
+{
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ const struct drm_crtc *crtc = plane_state->crtc;
+ struct dc_transfer_func *input_tf;
+ int ret = 0;
+
+ if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+ return -EINVAL;
+
+ ret = fill_plane_attributes_from_fb(
+ crtc->dev->dev_private,
+ dc_plane_state,
+ amdgpu_fb,
+ addrReq);
+
+ if (ret)
+ return ret;
+
+ input_tf = dc_create_transfer_func();
+
+ if (input_tf == NULL)
+ return -ENOMEM;
+
+ input_tf->type = TF_TYPE_PREDEFINED;
+ input_tf->tf = TRANSFER_FUNCTION_SRGB;
+
+ dc_plane_state->in_transfer_func = input_tf;
+
+ /* In case of gamma set, update gamma value */
+ if (crtc_state->gamma_lut)
+ fill_gamma_from_crtc_state(crtc_state, dc_plane_state);
+
+ return ret;
+}
+
+/*****************************************************************************/
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_DRIVER("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector)
+{
+ uint32_t bpc = connector->display_info.bpc;
+
+ /* Limited color depth to 8bit
+ * TODO: Still need to handle deep color
+ */
+ if (bpc > 8)
+ bpc = 8;
+
+ switch (bpc) {
+ case 0:
+ /* Temporary Work around, DRM don't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ int32_t width = mode_in->crtc_hdisplay * 9;
+ int32_t height = mode_in->crtc_vdisplay * 16;
+
+ if ((width - height) < 10 && (width - height) > -10)
+ return ASPECT_RATIO_16_9;
+ else
+ return ASPECT_RATIO_4_3;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (dc_crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ case PIXEL_ENCODING_YCBCR444:
+ case PIXEL_ENCODING_YCBCR420:
+ {
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ if (dc_crtc_timing->pix_clk_khz > 27030) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+
+ }
+ break;
+ case PIXEL_ENCODING_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return color_space;
+}
+
+/*****************************************************************************/
+
+static void
+fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+
+ memset(timing_out, 0, sizeof(struct dc_crtc_timing));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+
+ if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+ timing_out->vic = drm_match_cea_mode(mode_in);
+
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width =
+ mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch =
+ mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch =
+ mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width =
+ mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_khz = mode_in->crtc_clock;
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+
+ stream->output_color_space = get_output_color_space(timing_out);
+
+ {
+ struct dc_transfer_func *tf = dc_create_transfer_func();
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = TRANSFER_FUNCTION_SRGB;
+ stream->out_transfer_func = tf;
+ }
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strncpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static int create_fake_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink *sink = NULL;
+ struct dc_sink_init_data sink_init_data = { 0 };
+
+ sink_init_data.link = aconnector->dc_link;
+ sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return -ENOMEM;
+ }
+
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+ aconnector->fake_enable = true;
+
+ aconnector->dc_sink = sink;
+ aconnector->dc_link->local_sink = sink;
+
+ return 0;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state)
+{
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector *drm_connector;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode = *drm_mode;
+ bool native_mode_found = false;
+
+ if (aconnector == NULL) {
+ DRM_ERROR("aconnector is NULL!\n");
+ goto drm_connector_null;
+ }
+
+ if (dm_state == NULL) {
+ DRM_ERROR("dm_state is NULL!\n");
+ goto dm_state_null;
+ }
+
+ drm_connector = &aconnector->base;
+
+ if (!aconnector->dc_sink) {
+ /*
+ * Exclude MST from creating fake_sink
+ * TODO: need to enable MST into fake_sink feature
+ */
+ if (aconnector->mst_port)
+ goto stream_create_fail;
+
+ if (create_fake_sink(aconnector))
+ goto stream_create_fail;
+ }
+
+ stream = dc_create_stream_for_sink(aconnector->dc_sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto stream_create_fail;
+ }
+
+ list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &aconnector->base.modes,
+ struct drm_display_mode,
+ head);
+
+ if (preferred_mode == NULL) {
+ /* This may not be an error, the use case is when we we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode,
+ dm_state->scaling != RMX_OFF);
+ }
+
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base);
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ drm_connector,
+ aconnector->dc_sink);
+
+stream_create_fail:
+dm_state_null:
+drm_connector_null:
+ return stream;
+}
+
+static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static void dm_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dm_crtc_state *cur = to_dm_crtc_state(state);
+
+ /* TODO Destroy dc_stream objects are stream object is flattened */
+ if (cur->stream)
+ dc_stream_release(cur->stream);
+
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+
+ kfree(state);
+}
+
+static void dm_crtc_reset_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state;
+
+ if (crtc->state)
+ dm_crtc_destroy_state(crtc, crtc->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (WARN_ON(!state))
+ return;
+
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+
+}
+
+static struct drm_crtc_state *
+dm_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dm_crtc_state *state, *cur;
+
+ cur = to_dm_crtc_state(crtc->state);
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ if (cur->stream) {
+ state->stream = cur->stream;
+ dc_stream_retain(state->stream);
+ }
+
+ /* TODO Duplicate dc_stream after objects are stream object is flattened */
+
+ return &state->base;
+}
+
+/* Implemented only the options currently availible for the driver */
+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+ .reset = dm_crtc_reset_state,
+ .destroy = amdgpu_dm_crtc_destroy,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = dm_crtc_duplicate_state,
+ .atomic_destroy_state = dm_crtc_destroy_state,
+};
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /* Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activit. */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+ return ret;
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ const struct dc_link *link = aconnector->dc_link;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev) {
+ backlight_device_unregister(dm->backlight_dev);
+ dm->backlight_dev = NULL;
+ }
+
+ }
+#endif
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+
+ connector->state = &state->base;
+ connector->state->connector = connector;
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (new_state) {
+ __drm_atomic_helper_connector_duplicate_state(connector,
+ &new_state->base);
+ return &new_state->base;
+ }
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static struct drm_encoder *best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ DRM_DEBUG_DRIVER("Finding the best encoder\n");
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, NULL, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+ return NULL;
+ }
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ DRM_ERROR("No encoder id\n");
+ return NULL;
+}
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid;
+
+ if (!aconnector->base.edid_blob_ptr ||
+ !aconnector->base.edid_blob_ptr->data) {
+ DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
+ aconnector->base.name);
+
+ aconnector->base.force = DRM_FORCE_OFF;
+ aconnector->base.override_edid = false;
+ return;
+ }
+
+ edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON)
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /* In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+
+ aconnector->base.override_edid = true;
+ create_eml_sink(aconnector);
+}
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /* Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ stream = dc_create_stream_for_sink(dc_sink);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+ fill_stream_properties_from_drm_display_mode(stream, mode, connector);
+
+ stream->src.width = mode->hdisplay;
+ stream->src.height = mode->vdisplay;
+ stream->dst = stream->src;
+
+ if (dc_validate_stream(adev->dm.dc, stream) == DC_OK)
+ result = MODE_OK;
+
+ dc_stream_release(stream);
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplug a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * is missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .best_encoder = best_encoder
+};
+
+static void dm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+}
+
+static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
+ int ret = -EINVAL;
+
+ if (unlikely(!dm_crtc_state->stream &&
+ modeset_required(state, NULL, dm_crtc_state->stream))) {
+ WARN_ON(1);
+ return ret;
+ }
+
+ /* In some use cases, like reset, no stream is attached */
+ if (!dm_crtc_state->stream)
+ return 0;
+
+ if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
+ return 0;
+
+ return ret;
+}
+
+static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+ .disable = dm_crtc_helper_disable,
+ .atomic_check = dm_crtc_helper_atomic_check,
+ .mode_fixup = dm_crtc_helper_mode_fixup
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static void dm_drm_plane_reset(struct drm_plane *plane)
+{
+ struct dm_plane_state *amdgpu_state = NULL;
+
+ if (plane->state)
+ plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
+ WARN_ON(amdgpu_state == NULL);
+
+ if (amdgpu_state) {
+ plane->state = &amdgpu_state->base;
+ plane->state->plane = plane;
+ plane->state->rotation = DRM_MODE_ROTATE_0;
+ }
+}
+
+static struct drm_plane_state *
+dm_drm_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
+
+ old_dm_plane_state = to_dm_plane_state(plane->state);
+ dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
+ if (!dm_plane_state)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
+
+ if (old_dm_plane_state->dc_state) {
+ dm_plane_state->dc_state = old_dm_plane_state->dc_state;
+ dc_plane_state_retain(dm_plane_state->dc_state);
+ }
+
+ return &dm_plane_state->base;
+}
+
+void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (dm_plane_state->dc_state)
+ dc_plane_state_release(dm_plane_state->dc_state);
+
+ drm_atomic_helper_plane_destroy_state(plane, state);
+}
+
+static const struct drm_plane_funcs dm_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = dm_drm_plane_reset,
+ .atomic_duplicate_state = dm_drm_plane_duplicate_state,
+ .atomic_destroy_state = dm_drm_plane_destroy_state,
+};
+
+static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *rbo;
+ uint64_t chroma_addr = 0;
+ int r;
+ struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
+ unsigned int awidth;
+
+ dm_plane_state_old = to_dm_plane_state(plane->state);
+ dm_plane_state_new = to_dm_plane_state(new_state);
+
+ if (!new_state->fb) {
+ DRM_DEBUG_DRIVER("No FB bound\n");
+ return 0;
+ }
+
+ afb = to_amdgpu_framebuffer(new_state->fb);
+
+ obj = afb->obj;
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+
+ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &afb->address);
+
+
+ amdgpu_bo_unreserve(rbo);
+
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ return r;
+ }
+
+ amdgpu_bo_ref(rbo);
+
+ if (dm_plane_state_new->dc_state &&
+ dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
+ struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
+ plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
+ } else {
+ awidth = ALIGN(new_state->fb->width, 64);
+ plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ plane_state->address.video_progressive.luma_addr.low_part
+ = lower_32_bits(afb->address);
+ plane_state->address.video_progressive.luma_addr.high_part
+ = upper_32_bits(afb->address);
+ chroma_addr = afb->address + (u64)(awidth * new_state->fb->height);
+ plane_state->address.video_progressive.chroma_addr.low_part
+ = lower_32_bits(chroma_addr);
+ plane_state->address.video_progressive.chroma_addr.high_part
+ = upper_32_bits(chroma_addr);
+ }
+ }
+
+ /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer
+ * prepare and cleanup in drm_atomic_helper_prepare_planes
+ * and drm_atomic_helper_cleanup_planes because fb doens't in s3.
+ * IN 4.10 kernel this code should be removed and amdgpu_device_suspend
+ * code touching fram buffers should be avoided for DC.
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc);
+
+ acrtc->cursor_bo = obj;
+ }
+ return 0;
+}
+
+static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct amdgpu_bo *rbo;
+ struct amdgpu_framebuffer *afb;
+ int r;
+
+ if (!old_state->fb)
+ return;
+
+ afb = to_amdgpu_framebuffer(old_state->fb);
+ rbo = gem_to_amdgpu_bo(afb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ }
+
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
+}
+
+static int dm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct amdgpu_device *adev = plane->dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+
+ if (!dm_plane_state->dc_state)
+ return 0;
+
+ if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
+ .prepare_fb = dm_plane_helper_prepare_fb,
+ .cleanup_fb = dm_plane_helper_cleanup_fb,
+ .atomic_check = dm_plane_atomic_check,
+};
+
+/*
+ * TODO: these are currently initialized to rgb formats only.
+ * For future use cases we should either initialize them dynamically based on
+ * plane capabilities, or initialize this array to all formats, so internal drm
+ * check will succeed, and let DC to implement proper check
+ */
+static const uint32_t rgb_formats[] = {
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+};
+
+static const uint32_t yuv_formats[] = {
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+};
+
+static const u32 cursor_formats[] = {
+ DRM_FORMAT_ARGB8888
+};
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_plane *aplane,
+ unsigned long possible_crtcs)
+{
+ int res = -EPERM;
+
+ switch (aplane->base.type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ aplane->base.format_default = true;
+
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ rgb_formats,
+ ARRAY_SIZE(rgb_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_OVERLAY:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ yuv_formats,
+ ARRAY_SIZE(yuv_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ res = drm_universal_plane_init(
+ dm->adev->ddev,
+ &aplane->base,
+ possible_crtcs,
+ &dm_plane_funcs,
+ cursor_formats,
+ ARRAY_SIZE(cursor_formats),
+ NULL, aplane->base.type, NULL);
+ break;
+ }
+
+ drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (aplane->base.funcs->reset)
+ aplane->base.funcs->reset(&aplane->base);
+
+
+ return res;
+}
+
+static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ uint32_t crtc_index)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_plane *cursor_plane;
+
+ int res = -ENOMEM;
+
+ cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
+ if (!cursor_plane)
+ goto fail;
+
+ cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+
+ acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+ if (!acrtc)
+ goto fail;
+
+ res = drm_crtc_init_with_planes(
+ dm->ddev,
+ &acrtc->base,
+ plane,
+ &cursor_plane->base,
+ &amdgpu_dm_crtc_funcs, NULL);
+
+ if (res)
+ goto fail;
+
+ drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+
+ /* Create (reset) the plane state */
+ if (acrtc->base.funcs->reset)
+ acrtc->base.funcs->reset(&acrtc->base);
+
+ acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
+ acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
+
+ acrtc->crtc_id = crtc_index;
+ acrtc->base.enabled = false;
+
+ dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+ drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+
+ return 0;
+
+fail:
+ kfree(acrtc);
+ kfree(cursor_plane);
+ return res;
+}
+
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = helper->best_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ amdgpu_dm_get_native_mode(connector);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ const struct drm_connector_helper_funcs *helper =
+ connector->helper_private;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+
+ encoder = helper->best_encoder(connector);
+
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ return amdgpu_dm_connector->num_modes;
+}
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = dm->ddev->dev_private;
+
+ aconnector->connector_id = link_index;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+
+ mutex_init(&aconnector->hpd_lock);
+
+ /* configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dal_i2caux_submit_i2c_command(
+ ddc_service->ctx->i2caux,
+ ddc_service->ddc_pin,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.class = I2C_CLASS_DDC;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+/* Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ uint32_t link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ link->priv = aconnector;
+
+ DRM_DEBUG_DRIVER("%s()\n", __func__);
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_mode_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ drm_connector_register(&aconnector->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector);
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+
+ /* NOTE: this currently will create backlight device even if a panel
+ * is not connected to the eDP/LVDS connector.
+ *
+ * This is less than ideal but we don't have sink information at this
+ * stage since detection happens after. We can't do detection earlier
+ * since MST detection needs connectors to be created first.
+ */
+ if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) {
+ /* Event if registration failed, we should continue with
+ * DM initialization because not having a backlight control
+ * is better then a black screen.
+ */
+ amdgpu_dm_register_backlight_device(dm);
+
+ if (dm->backlight_dev)
+ dm->backlight_link = link;
+ }
+#endif
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * this is not correct translation but will work as soon as VBLANK
+ * constant is the same as PFLIP
+ */
+ int irq_type =
+ amdgpu_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ } else {
+
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+ if (adev->dm.freesync_module)
+ mod_freesync_remove_stream(adev->dm.freesync_module, stream);
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct dc_cursor_position *position)
+{
+ struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int x, y;
+ int xorigin = 0, yorigin = 0;
+
+ if (!crtc || !plane->state->fb) {
+ position->enable = false;
+ position->x = 0;
+ position->y = 0;
+ return 0;
+ }
+
+ if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
+ (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("%s: bad cursor width or height %d x %d\n",
+ __func__,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+ return -EINVAL;
+ }
+
+ x = plane->state->crtc_x;
+ y = plane->state->crtc_y;
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+ position->enable = true;
+ position->x = x;
+ position->y = y;
+ position->x_hotspot = xorigin;
+ position->y_hotspot = yorigin;
+
+ return 0;
+}
+
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state)
+{
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
+ struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
+ struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ uint64_t address = afb ? afb->address : 0;
+ struct dc_cursor_position position;
+ struct dc_cursor_attributes attributes;
+ int ret;
+
+ if (!plane->state->fb && !old_plane_state->fb)
+ return;
+
+ DRM_DEBUG_DRIVER("%s: crtc_id=%d with size %d to %d\n",
+ __func__,
+ amdgpu_crtc->crtc_id,
+ plane->state->crtc_w,
+ plane->state->crtc_h);
+
+ ret = get_cursor_position(plane, crtc, &position);
+ if (ret)
+ return;
+
+ if (!position.enable) {
+ /* turn off cursor */
+ if (crtc_state && crtc_state->stream)
+ dc_stream_set_cursor_position(crtc_state->stream,
+ &position);
+ return;
+ }
+
+ amdgpu_crtc->cursor_width = plane->state->crtc_w;
+ amdgpu_crtc->cursor_height = plane->state->crtc_h;
+
+ attributes.address.high_part = upper_32_bits(address);
+ attributes.address.low_part = lower_32_bits(address);
+ attributes.width = plane->state->crtc_w;
+ attributes.height = plane->state->crtc_h;
+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+ attributes.rotation_angle = 0;
+ attributes.attribute_flags.value = 0;
+
+ attributes.pitch = attributes.width;
+
+ if (crtc_state->stream) {
+ if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ &attributes))
+ DRM_ERROR("DC failed to set cursor attributes\n");
+
+ if (!dc_stream_set_cursor_position(crtc_state->stream,
+ &position))
+ DRM_ERROR("DC failed to set cursor position\n");
+ }
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ DRM_DEBUG_DRIVER("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+/*
+ * Executes flip
+ *
+ * Waits on all BO's fences and for proper vblank count
+ */
+static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ uint32_t target,
+ struct dc_state *state)
+{
+ unsigned long flags;
+ uint32_t target_vblank;
+ int r, vpos, hpos;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ struct dc_flip_addrs addr = { {0} };
+ /* TODO eliminate or rename surface_update */
+ struct dc_surface_update surface_updates[1] = { {0} };
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+
+
+ /* Prepare wait for target vblank early - before the fence-waits */
+ target_vblank = target - drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id);
+
+ /* TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to reserve buffer before flip\n");
+ WARN_ON(1);
+ }
+
+ /* Wait for all fences on this FB */
+ WARN_ON(reservation_object_wait_timeout_rcu(abo->tbo.resv, true, false,
+ MAX_SCHEDULE_TIMEOUT) < 0);
+
+ amdgpu_bo_unreserve(abo);
+
+ /* Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc->enabled &&
+ (amdgpu_get_crtc_scanoutpos(adev->ddev, acrtc->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(adev->ddev, acrtc->crtc_id)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /* Flip */
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ /* update crtc fb */
+ crtc->primary->fb = fb;
+
+ WARN_ON(acrtc->pflip_status != AMDGPU_FLIP_NONE);
+ WARN_ON(!acrtc_state->stream);
+
+ addr.address.grph.addr.low_part = lower_32_bits(afb->address);
+ addr.address.grph.addr.high_part = upper_32_bits(afb->address);
+ addr.flip_immediate = async_flip;
+
+
+ if (acrtc->base.state->event)
+ prepare_flip_isr(acrtc);
+
+ surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
+ surface_updates->flip_addr = &addr;
+
+
+ dc_commit_updates_for_stream(adev->dm.dc,
+ surface_updates,
+ 1,
+ acrtc_state->stream,
+ NULL,
+ &surface_updates->surface,
+ state);
+
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
+ __func__,
+ addr.address.grph.addr.high_part,
+ addr.address.grph.addr.low_part);
+
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool *wait_for_vblank)
+{
+ uint32_t i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dc_stream_state *dc_stream_attach;
+ struct dc_plane_state *plane_states_constructed[MAX_SURFACES];
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ int planes_count = 0;
+ unsigned long flags;
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ bool pflip_needed;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ handle_cursor_update(plane, old_plane_state);
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ pflip_needed = !state->allow_modeset;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (acrtc_attach->pflip_status != AMDGPU_FLIP_NONE) {
+ DRM_ERROR("%s: acrtc %d, already busy\n",
+ __func__,
+ acrtc_attach->crtc_id);
+ /* In commit tail framework this cannot happen */
+ WARN_ON(1);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ if (!pflip_needed) {
+ WARN_ON(!dm_new_plane_state->dc_state);
+
+ plane_states_constructed[planes_count] = dm_new_plane_state->dc_state;
+
+ dc_stream_attach = acrtc_state->stream;
+ planes_count++;
+
+ } else if (new_crtc_state->planes_changed) {
+ /* Assume even ONE crtc with immediate flip means
+ * entire can't wait for VBLANK
+ * TODO Check if it's correct
+ */
+ *wait_for_vblank =
+ new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC ?
+ false : true;
+
+ /* TODO: Needs rework for multiplane flip */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ drm_crtc_vblank_get(crtc);
+
+ amdgpu_dm_do_flip(
+ crtc,
+ fb,
+ drm_crtc_vblank_count(crtc) + *wait_for_vblank,
+ dm_state->context);
+ }
+
+ }
+
+ if (planes_count) {
+ unsigned long flags;
+
+ if (new_pcrtc_state->event) {
+
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ prepare_flip_isr(acrtc_attach);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (false == dc_commit_planes_to_stream(dm->dc,
+ plane_states_constructed,
+ planes_count,
+ dc_stream_attach,
+ dm_state->context))
+ dm_error("%s: Failed to attach plane!\n", __func__);
+ } else {
+ /*TODO BUG Here should go disable planes on CRTC. */
+ }
+}
+
+
+static int amdgpu_dm_atomic_commit(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool nonblock)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_device *adev = dev->dev_private;
+ int i;
+
+ /*
+ * We evade vblanks and pflips on crtc that
+ * should be changed. We do it here to flush & disable
+ * interrupts before drm_swap_state is called in drm_atomic_helper_commit
+ * it will update crtc->dm_crtc_state->stream pointer which is used in
+ * the ISRs.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
+ manage_dm_interrupts(adev, acrtc, false);
+ }
+ /* Add check here for SoC's that support hardware cursor plane, to
+ * unset legacy_cursor_update */
+
+ return drm_atomic_helper_commit(dev, state, nonblock);
+
+ /*TODO Handle EINTR, reenable IRQ*/
+}
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ uint32_t i, j;
+ uint32_t new_crtcs_count = 0;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct amdgpu_crtc *new_crtcs[MAX_STREAMS];
+ struct dc_stream_state *new_stream = NULL;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+
+ dm_state = to_dm_atomic_state(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_DRIVER("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnect in fact.
+ * dc_sink in NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+
+ /*
+ * this loop saves set mode crtcs
+ * we needed to enable vblanks once all
+ * resources acquired in dc after dc_commit_streams
+ */
+
+ /*TODO move all this into dm_crtc_state, get rid of
+ * new_crtcs array and use old and new atomic states
+ * instead
+ */
+ new_crtcs[new_crtcs_count] = acrtc;
+ new_crtcs_count++;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+ }
+ } /* for_each_crtc_in_state() */
+
+ /*
+ * Add streams after required streams from new and replaced streams
+ * are removed from freesync module
+ */
+ if (adev->dm.freesync_module) {
+ for (i = 0; i < new_crtcs_count; i++) {
+ struct amdgpu_dm_connector *aconnector = NULL;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ &new_crtcs[i]->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ new_stream = dm_new_crtc_state->stream;
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(
+ state,
+ &new_crtcs[i]->base);
+ if (!aconnector) {
+ DRM_DEBUG_DRIVER("Atomic commit: Failed to find connector for acrtc id:%d "
+ "skipping freesync init\n",
+ new_crtcs[i]->crtc_id);
+ continue;
+ }
+
+ mod_freesync_add_stream(adev->dm.freesync_module,
+ new_stream, &aconnector->caps);
+ }
+ }
+
+ if (dm_state->context)
+ WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+
+ /* Handle scaling and underscan changes*/
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_stream_status *status = NULL;
+
+ if (acrtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+ WARN_ON(!status);
+ WARN_ON(!status->plane_count);
+
+ /*TODO How it works with MPO ?*/
+ if (!dc_commit_planes_to_stream(
+ dm->dc,
+ status->plane_states,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ dm_state->context))
+ dm_error("%s: Failed to update stream scaling!\n", __func__);
+ }
+
+ for (i = 0; i < new_crtcs_count; i++) {
+ /*
+ * loop to enable interrupts on newly arrived crtc
+ */
+ struct amdgpu_crtc *acrtc = new_crtcs[i];
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (adev->dm.freesync_module)
+ mod_freesync_notify_mode_change(
+ adev->dm.freesync_module, &dm_new_crtc_state->stream, 1);
+
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+ }
+
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+}
+
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto err;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto err;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto err;
+
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+ if (!ret)
+ return 0;
+
+err:
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
+/*
+ * This functions handle all cases when set mode does not come upon hotplug.
+ * This include when the same display is unplugged then plugged back into the
+ * same port and when we are running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ if (!disconnected_acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*`
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /* Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /* Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
+ "timed out\n", crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static int dm_update_crtcs_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set */
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *new_con_state = NULL;
+ struct dm_connector_state *dm_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+
+ aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+
+ /* TODO This hack should go away */
+ if (aconnector && enable) {
+ // Make sure fake sink is created in plug-in scenario
+ new_con_state = drm_atomic_get_connector_state(state,
+ &aconnector->base);
+
+ if (IS_ERR(new_con_state)) {
+ ret = PTR_ERR_OR_ZERO(new_con_state);
+ break;
+ }
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ new_stream = create_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_conn_state);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it not and
+ * error, the OS will be updated after detection, and
+ * do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ break;
+ }
+ }
+
+ if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+
+ new_crtc_state->mode_changed = false;
+
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER(
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
+ "planes_changed:%d, mode_changed:%d,active_changed:%d,"
+ "connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto next_crtc;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_remove_stream_from_ctx(
+ dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+ goto next_crtc;
+
+ if (modereset_required(new_crtc_state))
+ goto next_crtc;
+
+ if (modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ dm_new_crtc_state->stream = new_stream;
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_DRIVER("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_add_stream_to_ctx(
+ dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+next_crtc:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+ }
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static int dm_update_planes_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ int i ;
+ /* TODO return page_flip_needed() function */
+ bool pflip_needed = !state->allow_modeset;
+ int ret = 0;
+
+ if (pflip_needed)
+ return ret;
+
+ /* Add new planes */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ /*TODO Implement atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+
+ if (!old_plane_crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ continue;
+
+ DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ if (!dc_remove_plane_from_context(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = EINVAL;
+ return ret;
+ }
+
+
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ continue;
+
+ if (!new_plane_crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ continue;
+
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = dc_create_plane_state(dc);
+
+ DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ if (!dm_new_plane_state->dc_state) {
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = fill_plane_attributes(
+ new_plane_crtc->dev->dev_private,
+ dm_new_plane_state->dc_state,
+ new_plane_state,
+ new_crtc_state,
+ false);
+ if (ret)
+ return ret;
+
+
+ if (!dc_add_plane_to_context(
+ dc,
+ dm_new_crtc_state->stream,
+ dm_new_plane_state->dc_state,
+ dm_state->context)) {
+
+ ret = -EINVAL;
+ return ret;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ int i;
+ int ret;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct dc *dc = adev->dm.dc;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+
+ /*
+ * This bool will be set for true for any modeset/reset
+ * or plane update which implies non fast surface update.
+ */
+ bool lock_and_validation_needed = false;
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret)
+ goto fail;
+
+ /*
+ * legacy_cursor_update should be made false for SoC's having
+ * a dedicated hardware plane for cursor in amdgpu_dm_atomic_commit(),
+ * otherwise for software cursor plane,
+ * we should not add it to list of affected planes.
+ */
+ if (state->legacy_cursor_update) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->color_mgmt_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+ } else {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed)
+ continue;
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ goto fail;
+ }
+ }
+
+ dm_state->context = dc_create_state();
+ ASSERT(dm_state->context);
+ dc_resource_state_copy_construct_current(dc, dm_state->context);
+
+ /* Remove exiting planes if they are modified */
+ ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Disable all crtcs which require disable */
+ ret = dm_update_crtcs_state(dc, state, false, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Enable all crtcs which require enable */
+ ret = dm_update_crtcs_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Add new/modified planes */
+ ret = dm_update_planes_state(dc, state, true, &lock_and_validation_needed);
+ if (ret) {
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret)
+ goto fail;
+
+ /* Check scaling and underscan changes*/
+ /*TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /*
+ * For full updates case when
+ * removing/adding/updating streams on once CRTC while flipping
+ * on another CRTC,
+ * acquiring global lock will guarantee that any such full
+ * update commit
+ * will wait for completion of any outstanding flip using DRMs
+ * synchronization events.
+ */
+
+ if (lock_and_validation_needed) {
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret)
+ goto fail;
+
+ if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ uint8_t dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i;
+ uint64_t val_capable;
+ bool edid_check_required;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ edid_check_required = false;
+ if (!amdgpu_dm_connector->dc_sink) {
+ DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
+ return;
+ }
+ if (!adev->dm.freesync_module)
+ return;
+ /*
+ * if edid non zero restrict freesync only for dp and edp
+ */
+ if (edid) {
+ if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ edid_check_required = is_dp_capable_without_timing_msa(
+ adev->dm.dc,
+ amdgpu_dm_connector);
+ }
+ }
+ val_capable = 0;
+ if (edid_check_required == true && (edid->version > 1 ||
+ (edid->version == 1 && edid->revision > 1))) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ amdgpu_dm_connector->min_vfreq = range->min_vfreq;
+ amdgpu_dm_connector->max_vfreq = range->max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+ amdgpu_dm_connector->caps.supported = true;
+ amdgpu_dm_connector->caps.min_refresh_in_micro_hz =
+ amdgpu_dm_connector->min_vfreq * 1000000;
+ amdgpu_dm_connector->caps.max_refresh_in_micro_hz =
+ amdgpu_dm_connector->max_vfreq * 1000000;
+ val_capable = 1;
+ }
+ }
+
+ /*
+ * TODO figure out how to notify user-mode or DRM of freesync caps
+ * once we figure out how to deal with freesync in an upstreamable
+ * fashion
+ */
+
+}
+
+void amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector)
+{
+ /*
+ * TODO fill in once we figure out how to deal with freesync in
+ * an upstreamable fashion
+ */
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
new file mode 100644
index 000000000000..117521c6a6ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_H__
+#define __AMDGPU_DM_H__
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include "dc.h"
+
+/*
+ * This file contains the definition for amdgpu_display_manager
+ * and its API for amdgpu driver's use.
+ * This component provides all the display related functionality
+ * and this is the only component that calls DAL API.
+ * The API contained here intended for amdgpu driver use.
+ * The API that is called directly from KMS framework is located
+ * in amdgpu_dm_kms.h file
+ */
+
+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+/*
+#include "include/amdgpu_dal_power_if.h"
+#include "amdgpu_dm_irq.h"
+*/
+
+#include "irq_types.h"
+#include "signal_types.h"
+
+/* Forward declarations */
+struct amdgpu_device;
+struct drm_device;
+struct amdgpu_dm_irq_handler_data;
+
+struct amdgpu_dm_prev_state {
+ struct drm_framebuffer *fb;
+ int32_t x;
+ int32_t y;
+ struct drm_display_mode mode;
+};
+
+struct common_irq_params {
+ struct amdgpu_device *adev;
+ enum dc_irq_source irq_src;
+};
+
+struct irq_list_head {
+ struct list_head head;
+ /* In case this interrupt needs post-processing, 'work' will be queued*/
+ struct work_struct work;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+struct dm_comressor_info {
+ void *cpu_addr;
+ struct amdgpu_bo *bo_ptr;
+ uint64_t gpu_addr;
+};
+#endif
+
+
+struct amdgpu_display_manager {
+ struct dal *dal;
+ struct dc *dc;
+ struct cgs_device *cgs_device;
+ /* lock to be used when DAL is called from SYNC IRQ context */
+ spinlock_t dal_lock;
+
+ struct amdgpu_device *adev; /*AMD base driver*/
+ struct drm_device *ddev; /*DRM base driver*/
+ u16 display_indexes_num;
+
+ struct amdgpu_dm_prev_state prev_state;
+
+ /*
+ * 'irq_source_handler_table' holds a list of handlers
+ * per (DAL) IRQ source.
+ *
+ * Each IRQ source may need to be handled at different contexts.
+ * By 'context' we mean, for example:
+ * - The ISR context, which is the direct interrupt handler.
+ * - The 'deferred' context - this is the post-processing of the
+ * interrupt, but at a lower priority.
+ *
+ * Note that handlers are called in the same order as they were
+ * registered (FIFO).
+ */
+ struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+ struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+
+ struct common_irq_params
+ pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+
+ struct common_irq_params
+ vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+
+ /* this spin lock synchronizes access to 'irq_handler_list_table' */
+ spinlock_t irq_handler_list_table_lock;
+
+ /* Timer-related data. */
+ struct list_head timer_handler_list;
+ struct workqueue_struct *timer_workqueue;
+
+ /* Use dal_mutex for any activity which is NOT syncronized by
+ * DRM mode setting locks.
+ * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
+ * DRM mode setting locks being acquired. This is where dal_mutex
+ * is acquired before calling into DAL. */
+ struct mutex dal_mutex;
+
+ struct backlight_device *backlight_dev;
+
+ const struct dc_link *backlight_link;
+
+ struct work_struct mst_hotplug_work;
+
+ struct mod_freesync *freesync_module;
+
+ /**
+ * Caches device atomic state for suspend/resume
+ */
+ struct drm_atomic_state *cached_state;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct dm_comressor_info compressor;
+#endif
+};
+
+struct amdgpu_dm_connector {
+
+ struct drm_connector base;
+ uint32_t connector_id;
+
+ /* we need to mind the EDID between detect
+ and get modes due to analog/digital/tvencoder */
+ struct edid *edid;
+
+ /* shared with amdgpu */
+ struct amdgpu_hpd hpd;
+
+ /* number of modes generated from EDID at 'dc_sink' */
+ int num_modes;
+
+ /* The 'old' sink - before an HPD.
+ * The 'current' sink is in dc_link->sink. */
+ struct dc_sink *dc_sink;
+ struct dc_link *dc_link;
+ struct dc_sink *dc_em_sink;
+
+ /* DM only */
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *port;
+ struct amdgpu_dm_connector *mst_port;
+ struct amdgpu_encoder *mst_encoder;
+
+ /* TODO see if we can merge with ddc_bus or make a dm_connector */
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Monitor range limits */
+ int min_vfreq ;
+ int max_vfreq ;
+ int pixel_clock_mhz;
+
+ /*freesync caps*/
+ struct mod_freesync_caps caps;
+
+ struct mutex hpd_lock;
+
+ bool fake_enable;
+};
+
+#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+
+extern const struct amdgpu_ip_block_version dm_ip_block;
+
+struct amdgpu_framebuffer;
+struct amdgpu_display_manager;
+struct dc_validation_set;
+struct dc_plane_state;
+
+struct dm_plane_state {
+ struct drm_plane_state base;
+ struct dc_plane_state *dc_state;
+};
+
+struct dm_crtc_state {
+ struct drm_crtc_state base;
+ struct dc_stream_state *stream;
+};
+
+#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
+
+struct dm_atomic_state {
+ struct drm_atomic_state base;
+
+ struct dc_state *context;
+};
+
+#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
+
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val);
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val);
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index);
+
+int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector);
+
+void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector,
+ struct edid *edid);
+
+void
+amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
+
+extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
+
+#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
new file mode 100644
index 000000000000..9bd142f65f9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+#include <linux/version.h>
+#include <linux/i2c.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_edid.h>
+
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "dc.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+#include "dm_helpers.h"
+
+/* dm_helpers_parse_edid_caps
+ *
+ * Parse edid caps
+ *
+ * @edid: [in] pointer to edid
+ * edid_caps: [in] pointer to edid caps
+ * @return
+ * void
+ * */
+enum dc_edid_status dm_helpers_parse_edid_caps(
+ struct dc_context *ctx,
+ const struct dc_edid *edid,
+ struct dc_edid_caps *edid_caps)
+{
+ struct edid *edid_buf = (struct edid *) edid->raw_edid;
+ struct cea_sad *sads;
+ int sad_count = -1;
+ int sadb_count = -1;
+ int i = 0;
+ int j = 0;
+ uint8_t *sadb = NULL;
+
+ enum dc_edid_status result = EDID_OK;
+
+ if (!edid_caps || !edid)
+ return EDID_BAD_INPUT;
+
+ if (!drm_edid_is_valid(edid_buf))
+ result = EDID_BAD_CHECKSUM;
+
+ edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
+ ((uint16_t) edid_buf->mfg_id[1])<<8;
+ edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
+ ((uint16_t) edid_buf->prod_code[1])<<8;
+ edid_caps->serial_number = edid_buf->serial;
+ edid_caps->manufacture_week = edid_buf->mfg_week;
+ edid_caps->manufacture_year = edid_buf->mfg_year;
+
+ /* One of the four detailed_timings stores the monitor name. It's
+ * stored in an array of length 13. */
+ for (i = 0; i < 4; i++) {
+ if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
+ while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
+ if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
+ break;
+
+ edid_caps->display_name[j] =
+ edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
+ j++;
+ }
+ }
+ }
+
+ edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
+ (struct edid *) edid->raw_edid);
+
+ sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
+ if (sad_count <= 0) {
+ DRM_INFO("SADs count is: %d, don't need to read it\n",
+ sad_count);
+ return result;
+ }
+
+ edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
+ for (i = 0; i < edid_caps->audio_mode_count; ++i) {
+ struct cea_sad *sad = &sads[i];
+
+ edid_caps->audio_modes[i].format_code = sad->format;
+ edid_caps->audio_modes[i].channel_count = sad->channels;
+ edid_caps->audio_modes[i].sample_rate = sad->freq;
+ edid_caps->audio_modes[i].sample_size = sad->byte2;
+ }
+
+ sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
+
+ if (sadb_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
+ sadb_count = 0;
+ }
+
+ if (sadb_count)
+ edid_caps->speaker_flags = sadb[0];
+ else
+ edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
+
+ kfree(sads);
+ kfree(sadb);
+
+ return result;
+}
+
+static void get_payload_table(
+ struct amdgpu_dm_connector *aconnector,
+ struct dp_mst_stream_allocation_table *proposed_table)
+{
+ int i;
+ struct drm_dp_mst_topology_mgr *mst_mgr =
+ &aconnector->mst_port->mst_mgr;
+
+ mutex_lock(&mst_mgr->payload_lock);
+
+ proposed_table->stream_count = 0;
+
+ /* number of active streams */
+ for (i = 0; i < mst_mgr->max_payloads; i++) {
+ if (mst_mgr->payloads[i].num_slots == 0)
+ break; /* end of vcp_id table */
+
+ ASSERT(mst_mgr->payloads[i].payload_state !=
+ DP_PAYLOAD_DELETE_LOCAL);
+
+ if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
+ mst_mgr->payloads[i].payload_state ==
+ DP_PAYLOAD_REMOTE) {
+
+ struct dp_mst_stream_allocation *sa =
+ &proposed_table->stream_allocations[
+ proposed_table->stream_count];
+
+ sa->slot_count = mst_mgr->payloads[i].num_slots;
+ sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
+ proposed_table->stream_count++;
+ }
+ }
+
+ mutex_unlock(&mst_mgr->payload_lock);
+}
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ struct dp_mst_stream_allocation_table *proposed_table,
+ bool enable)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ int slots = 0;
+ bool ret;
+ int clock;
+ int bpp = 0;
+ int pbn = 0;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ mst_port = aconnector->port;
+
+ if (enable) {
+ clock = stream->timing.pix_clk_khz;
+
+ switch (stream->timing.display_color_depth) {
+
+ case COLOR_DEPTH_666:
+ bpp = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bpp = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bpp = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bpp = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bpp = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bpp = 16;
+ break;
+ default:
+ ASSERT(bpp != 0);
+ break;
+ }
+
+ bpp = bpp * 3;
+
+ /* TODO need to know link rate */
+
+ pbn = drm_dp_calc_pbn_mode(clock, bpp);
+
+ slots = drm_dp_find_vcpi_slots(mst_mgr, pbn);
+ ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, pbn, slots);
+
+ if (!ret)
+ return false;
+
+ } else {
+ drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
+ }
+
+ ret = drm_dp_update_payload_part1(mst_mgr);
+
+ /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
+ * AUX message. The sequence is slot 1-63 allocated sequence for each
+ * stream. AMD ASIC stream slot allocation should follow the same
+ * sequence. copy DRM MST allocation to dc */
+
+ get_payload_table(aconnector, proposed_table);
+
+ if (ret)
+ return false;
+
+ return true;
+}
+
+/*
+ * Polls for ACT (allocation change trigger) handled and sends
+ * ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ int ret;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ ret = drm_dp_check_act_status(mst_mgr);
+
+ if (ret)
+ return false;
+
+ return true;
+}
+
+bool dm_helpers_dp_mst_send_payload_allocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ bool enable)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ int ret;
+
+ aconnector = stream->sink->priv;
+
+ if (!aconnector || !aconnector->mst_port)
+ return false;
+
+ mst_port = aconnector->port;
+
+ mst_mgr = &aconnector->mst_port->mst_mgr;
+
+ if (!mst_mgr->mst_state)
+ return false;
+
+ ret = drm_dp_update_payload_part2(mst_mgr);
+
+ if (ret)
+ return false;
+
+ if (!enable)
+ drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
+
+ return true;
+}
+
+bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event)
+{
+ return true;
+}
+
+void dm_dtn_log_begin(struct dc_context *ctx)
+{}
+
+void dm_dtn_log_append_v(struct dc_context *ctx,
+ const char *pMsg, ...)
+{}
+
+void dm_dtn_log_end(struct dc_context *ctx)
+{}
+
+bool dm_helpers_dp_mst_start_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ bool boot)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ if (boot) {
+ DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+ return true;
+ }
+
+ DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
+}
+
+void dm_helpers_dp_mst_stop_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return;
+ }
+
+ DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
+ aconnector, aconnector->base.base.id);
+
+ if (aconnector->mst_mgr.mst_state == true)
+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
+}
+
+bool dm_helpers_dp_read_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
+ data, size) > 0;
+}
+
+bool dm_helpers_dp_write_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
+ address, (uint8_t *)data, size) > 0;
+}
+
+bool dm_helpers_submit_i2c(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ struct i2c_command *cmd)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct i2c_msg *msgs;
+ int i = 0;
+ int num = cmd->number_of_payloads;
+ bool result;
+
+ if (!aconnector) {
+ DRM_ERROR("Failed to found connector for link!");
+ return false;
+ }
+
+ msgs = kzalloc(num * sizeof(struct i2c_msg), GFP_KERNEL);
+
+ if (!msgs)
+ return false;
+
+ for (i = 0; i < num; i++) {
+ msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
+ msgs[i].addr = cmd->payloads[i].address;
+ msgs[i].len = cmd->payloads[i].length;
+ msgs[i].buf = cmd->payloads[i].data;
+ }
+
+ result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
+
+ kfree(msgs);
+
+ return result;
+}
+
+enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ struct dc_sink *sink)
+{
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ struct i2c_adapter *ddc;
+ int retry = 3;
+ enum dc_edid_status edid_status;
+ struct edid *edid;
+
+ if (link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /* some dongles read edid incorrectly the first time,
+ * do check sum and retry to make sure read correct edid.
+ */
+ do {
+
+ edid = drm_get_edid(&aconnector->base, ddc);
+
+ if (!edid)
+ return EDID_NO_RESPONSE;
+
+ sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
+ memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
+
+ /* We don't need the original edid anymore */
+ kfree(edid);
+
+ edid_status = dm_helpers_parse_edid_caps(
+ ctx,
+ &sink->dc_edid,
+ &sink->edid_caps);
+
+ } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+
+ if (edid_status != EDID_OK)
+ DRM_ERROR("EDID err: %d, on connector: %s",
+ edid_status,
+ aconnector->base.name);
+
+ return edid_status;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
new file mode 100644
index 000000000000..ca5d0d1581dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "dm_services_types.h"
+#include "dc.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+
+/******************************************************************************
+ * Private declarations.
+ *****************************************************************************/
+
+struct handler_common_data {
+ struct list_head list;
+ interrupt_handler handler;
+ void *handler_arg;
+
+ /* DM which this handler belongs to */
+ struct amdgpu_display_manager *dm;
+};
+
+struct amdgpu_dm_irq_handler_data {
+ struct handler_common_data hcd;
+ /* DAL irq source which registered for this interrupt. */
+ enum dc_irq_source irq_source;
+};
+
+struct amdgpu_dm_timer_handler_data {
+ struct handler_common_data hcd;
+ struct delayed_work d_work;
+};
+
+#define DM_IRQ_TABLE_LOCK(adev, flags) \
+ spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
+
+#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
+ spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
+
+/******************************************************************************
+ * Private functions.
+ *****************************************************************************/
+
+static void init_handler_common_data(struct handler_common_data *hcd,
+ void (*ih)(void *),
+ void *args,
+ struct amdgpu_display_manager *dm)
+{
+ hcd->handler = ih;
+ hcd->handler_arg = args;
+ hcd->dm = dm;
+}
+
+/**
+ * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ *
+ * @work: work struct
+ */
+static void dm_irq_work_func(struct work_struct *work)
+{
+ struct list_head *entry;
+ struct irq_list_head *irq_list_head =
+ container_of(work, struct irq_list_head, work);
+ struct list_head *handler_list = &irq_list_head->head;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+
+ list_for_each(entry, handler_list) {
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ /* Call a DAL subcomponent which registered for interrupt notification
+ * at INTERRUPT_LOW_IRQ_CONTEXT.
+ * (The most common use is HPD interrupt) */
+}
+
+/**
+ * Remove a handler and return a pointer to hander list from which the
+ * handler was removed.
+ */
+static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
+ void *ih,
+ const struct dc_interrupt_params *int_params)
+{
+ struct list_head *hnd_list;
+ struct list_head *entry, *tmp;
+ struct amdgpu_dm_irq_handler_data *handler;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+ enum dc_irq_source irq_source;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ irq_source = int_params->irq_source;
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_for_each_safe(entry, tmp, hnd_list) {
+
+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ if (ih == handler) {
+ /* Found our handler. Remove it from the list. */
+ list_del(&handler->hcd.list);
+ handler_removed = true;
+ break;
+ }
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_removed == false) {
+ /* Not necessarily an error - caller may not
+ * know the context. */
+ return NULL;
+ }
+
+ kfree(handler);
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
+ ih, int_params->irq_source, int_params->int_context);
+
+ return hnd_list;
+}
+
+/* If 'handler_in == NULL' then remove ALL handlers. */
+static void remove_timer_handler(struct amdgpu_device *adev,
+ struct amdgpu_dm_timer_handler_data *handler_in)
+{
+ struct amdgpu_dm_timer_handler_data *handler_temp;
+ struct list_head *handler_list;
+ struct list_head *entry, *tmp;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ handler_list = &adev->dm.timer_handler_list;
+
+ list_for_each_safe(entry, tmp, handler_list) {
+ /* Note that list_for_each_safe() guarantees that
+ * handler_temp is NOT null. */
+ handler_temp = list_entry(entry,
+ struct amdgpu_dm_timer_handler_data, hcd.list);
+
+ if (handler_in == NULL || handler_in == handler_temp) {
+ list_del(&handler_temp->hcd.list);
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
+ handler_temp);
+
+ if (handler_in == NULL) {
+ /* Since it is still in the queue, it must
+ * be cancelled. */
+ cancel_delayed_work_sync(&handler_temp->d_work);
+ }
+
+ kfree(handler_temp);
+ handler_removed = true;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ }
+
+ /* Remove ALL handlers. */
+ if (handler_in == NULL)
+ continue;
+
+ /* Remove a SPECIFIC handler.
+ * Found our handler - we can stop here. */
+ if (handler_in == handler_temp)
+ break;
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_in != NULL && handler_removed == false)
+ DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
+ handler_in);
+}
+
+static bool
+validate_irq_registration_params(struct dc_interrupt_params *int_params,
+ void (*ih)(void *))
+{
+ if (NULL == int_params || NULL == ih) {
+ DRM_ERROR("DM_IRQ: invalid input!\n");
+ return false;
+ }
+
+ if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
+ DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+ int_params->int_context);
+ return false;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
+ int_params->irq_source);
+ return false;
+ }
+
+ return true;
+}
+
+static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
+ irq_handler_idx handler_idx)
+{
+ if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+ DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
+ return false;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
+ return false;
+ }
+
+ return true;
+}
+/******************************************************************************
+ * Public functions.
+ *
+ * Note: caller is responsible for input validation.
+ *****************************************************************************/
+
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ struct dc_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args)
+{
+ struct list_head *hnd_list;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ unsigned long irq_table_flags;
+ enum dc_irq_source irq_source;
+
+ if (false == validate_irq_registration_params(int_params, ih))
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+
+ handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+ if (!handler_data) {
+ DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+ }
+
+ memset(handler_data, 0, sizeof(*handler_data));
+
+ init_handler_common_data(&handler_data->hcd, ih, handler_args,
+ &adev->dm);
+
+ irq_source = int_params->irq_source;
+
+ handler_data->irq_source = irq_source;
+
+ /* Lock the list, add the handler. */
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_add_tail(&handler_data->hcd.list, hnd_list);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ /* This pointer will be stored by code which requested interrupt
+ * registration.
+ * The same pointer will be needed in order to unregister the
+ * interrupt. */
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
+ handler_data,
+ irq_source,
+ int_params->int_context);
+
+ return handler_data;
+}
+
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source,
+ void *ih)
+{
+ struct list_head *handler_list;
+ struct dc_interrupt_params int_params;
+ int i;
+
+ if (false == validate_irq_unregistration_params(irq_source, ih))
+ return;
+
+ memset(&int_params, 0, sizeof(int_params));
+
+ int_params.irq_source = irq_source;
+
+ for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
+
+ int_params.int_context = i;
+
+ handler_list = remove_irq_handler(adev, ih, &int_params);
+
+ if (handler_list != NULL)
+ break;
+ }
+
+ if (handler_list == NULL) {
+ /* If we got here, it means we searched all irq contexts
+ * for this irq source, but the handler was not found. */
+ DRM_ERROR(
+ "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
+ ih, irq_source);
+ }
+}
+
+int amdgpu_dm_irq_init(struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+
+ DRM_DEBUG_KMS("DM_IRQ\n");
+
+ spin_lock_init(&adev->dm.irq_handler_list_table_lock);
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ /* low context handler list init */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ INIT_LIST_HEAD(&lh->head);
+ INIT_WORK(&lh->work, dm_irq_work_func);
+
+ /* high context handler init */
+ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+ }
+
+ INIT_LIST_HEAD(&adev->dm.timer_handler_list);
+
+ /* allocate and initialize the workqueue for DM timer */
+ adev->dm.timer_workqueue = create_singlethread_workqueue(
+ "dm_timer_queue");
+ if (adev->dm.timer_workqueue == NULL) {
+ DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* DM IRQ and timer resource release */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+ DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+
+ /* The handler was removed from the table,
+ * it means it is safe to flush all the 'work'
+ * (because no code can schedule a new one). */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ flush_work(&lh->work);
+ }
+
+ /* Cancel ALL timers and release handlers (if any). */
+ remove_timer_handler(adev, NULL);
+ /* Release the queue itself. */
+ destroy_workqueue(adev->dm.timer_workqueue);
+}
+
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h;
+ struct list_head *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: suspend\n");
+
+ /**
+ * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK
+ * will be disabled from manage_dm_interrupts on disable CRTC.
+ */
+ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, false);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ return 0;
+}
+
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h, *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: early resume\n");
+
+ /* re-enable short pulse interrupts HW interrupt */
+ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, true);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ return 0;
+}
+
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h, *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: resume\n");
+
+ /**
+ * Renable HW interrupt for HPD and only since FLIP and VBLANK
+ * will be enabled from manage_dm_interrupts on enable CRTC.
+ */
+ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dc_interrupt_set(adev->dm.dc, src, true);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+ return 0;
+}
+
+/**
+ * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
+ * "irq_source".
+ */
+static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source)
+{
+ unsigned long irq_table_flags;
+ struct work_struct *work = NULL;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head))
+ work = &adev->dm.irq_handler_list_low_tab[irq_source].work;
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (work) {
+ if (!schedule_work(work))
+ DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n",
+ irq_source);
+ }
+
+}
+
+/** amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
+ */
+static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source)
+{
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ struct list_head *entry;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ list_for_each(
+ entry,
+ &adev->dm.irq_handler_list_high_tab[irq_source]) {
+
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ /* Call a subcomponent which registered for immediate
+ * interrupt notification */
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
+/*
+ * amdgpu_dm_irq_handler
+ *
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+
+ enum dc_irq_source src =
+ dc_interrupt_to_irq_source(
+ adev->dm.dc,
+ entry->src_id,
+ entry->src_data[0]);
+
+ dc_interrupt_ack(adev->dm.dc, src);
+
+ /* Call high irq work immediately */
+ amdgpu_dm_irq_immediate_work(adev, src);
+ /*Schedule low_irq work */
+ amdgpu_dm_irq_schedule_work(adev, src);
+
+ return 0;
+}
+
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+{
+ switch (type) {
+ case AMDGPU_HPD_1:
+ return DC_IRQ_SOURCE_HPD1;
+ case AMDGPU_HPD_2:
+ return DC_IRQ_SOURCE_HPD2;
+ case AMDGPU_HPD_3:
+ return DC_IRQ_SOURCE_HPD3;
+ case AMDGPU_HPD_4:
+ return DC_IRQ_SOURCE_HPD4;
+ case AMDGPU_HPD_5:
+ return DC_IRQ_SOURCE_HPD5;
+ case AMDGPU_HPD_6:
+ return DC_IRQ_SOURCE_HPD6;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dc_interrupt_set(adev->dm.dc, src, st);
+ return 0;
+}
+
+static inline int dm_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state,
+ const enum irq_type dal_irq_type,
+ const char *func)
+{
+ bool st;
+ enum dc_irq_source irq_source;
+
+ struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
+
+ if (!acrtc) {
+ DRM_ERROR(
+ "%s: crtc is NULL at id :%d\n",
+ func,
+ crtc_id);
+ return 0;
+ }
+
+ irq_source = dal_irq_type + acrtc->otg_inst;
+
+ st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dc_interrupt_set(adev->dm.dc, irq_source, st);
+ return 0;
+}
+
+static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_PFLIP,
+ __func__);
+}
+
+static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VBLANK,
+ __func__);
+}
+
+static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
+ .set = amdgpu_dm_set_crtc_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
+ .set = amdgpu_dm_set_pflip_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
+ .set = amdgpu_dm_set_hpd_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
+
+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
+}
+
+/*
+ * amdgpu_dm_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd,
+ true);
+ }
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd_rx,
+ true);
+ }
+ }
+}
+
+/**
+ * amdgpu_dm_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
+
+ dc_interrupt_set(adev->dm.dc, dc_link->irq_source_hpd, false);
+
+ if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ dc_interrupt_set(adev->dm.dc,
+ dc_link->irq_source_hpd_rx,
+ false);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
new file mode 100644
index 000000000000..82f8e761beca
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_H__
+#define __AMDGPU_DM_IRQ_H__
+
+#include "irq_types.h" /* DAL irq definitions */
+
+/*
+ * Display Manager IRQ-related interfaces (for use by DAL).
+ */
+
+/**
+ * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM initialization.
+ *
+ * Returns:
+ * 0 - success
+ * non-zero - error
+ */
+int amdgpu_dm_irq_init(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM destruction.
+ *
+ */
+void amdgpu_dm_irq_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
+ *
+ * @adev: AMD DRM device
+ * @int_params: parameters for the irq
+ * @ih: pointer to the irq hander function
+ * @handler_args: arguments which will be passed to ih
+ *
+ * Returns:
+ * IRQ Handler Index on success.
+ * NULL on failure.
+ *
+ * Cannot be called from an interrupt handler.
+ */
+void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
+ struct dc_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args);
+
+/**
+ * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
+ * by amdgpu_dm_irq_register_interrupt().
+ *
+ * @adev: AMD DRM device.
+ * @ih_index: irq handler index which was returned by
+ * amdgpu_dm_irq_register_interrupt
+ */
+void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
+ enum dc_irq_source irq_source,
+ void *ih_index);
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
+
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
+ *
+ */
+int amdgpu_dm_irq_suspend(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume.
+ * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
+ *
+ */
+int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev);
+int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
new file mode 100644
index 000000000000..707928b88448
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -0,0 +1,441 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/version.h>
+#include <drm/drm_atomic_helper.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_mst_types.h"
+
+#include "dc.h"
+#include "dm_helpers.h"
+
+#include "dc_link_ddc.h"
+
+/* #define TRACE_DPCD */
+
+#ifdef TRACE_DPCD
+#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
+
+static inline char *side_band_msg_type_to_str(uint32_t address)
+{
+ static char str[10] = {0};
+
+ if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
+ strcpy(str, "DOWN_REQ");
+ else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
+ strcpy(str, "UP_REP");
+ else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
+ strcpy(str, "DOWN_REP");
+ else
+ strcpy(str, "UP_REQ");
+
+ return str;
+}
+
+static void log_dpcd(uint8_t type,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size,
+ bool res)
+{
+ DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
+ (type == DP_AUX_NATIVE_READ) ||
+ (type == DP_AUX_I2C_READ) ?
+ "Read" : "Write",
+ address,
+ SIDE_BAND_MSG(address) ?
+ side_band_msg_type_to_str(address) : "Nop",
+ res ? "OK" : "Fail");
+
+ if (res) {
+ print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
+ }
+}
+#endif
+
+static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
+ I2C_MOT_TRUE : I2C_MOT_FALSE;
+ enum ddc_result res;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_READ:
+ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_NATIVE_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ false,
+ I2C_MOT_UNDEF,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_I2C_READ:
+ res = dal_ddc_service_read_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_I2C_WRITE:
+ res = dal_ddc_service_write_dpcd_data(
+ TO_DM_AUX(aux)->ddc_service,
+ true,
+ mot,
+ msg->address,
+ msg->buffer,
+ msg->size);
+ break;
+ default:
+ return 0;
+ }
+
+#ifdef TRACE_DPCD
+ log_dpcd(msg->request,
+ msg->address,
+ msg->buffer,
+ msg->size,
+ r == DDC_RESULT_SUCESSFULL);
+#endif
+
+ return msg->size;
+}
+
+static enum drm_connector_status
+dm_dp_mst_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_dm_connector *master = aconnector->mst_port;
+
+ enum drm_connector_status status =
+ drm_dp_mst_detect_port(
+ connector,
+ &master->mst_mgr,
+ aconnector->port);
+
+ return status;
+}
+
+static void
+dm_dp_mst_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+
+ drm_encoder_cleanup(&amdgpu_encoder->base);
+ kfree(amdgpu_encoder);
+ drm_connector_cleanup(connector);
+ kfree(amdgpu_dm_connector);
+}
+
+static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
+ .detect = dm_dp_mst_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = dm_dp_mst_connector_destroy,
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property
+};
+
+static int dm_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ return drm_add_edid_modes(connector, edid);
+}
+
+static int dm_dp_mst_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ int ret = 0;
+
+ if (!aconnector)
+ return dm_connector_update_modes(connector, NULL);
+
+ if (!aconnector->edid) {
+ struct edid *edid;
+ struct dc_sink *dc_sink;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+
+ if (!edid) {
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+ return ret;
+ }
+
+ aconnector->edid = edid;
+
+ dc_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ dc_sink->priv = aconnector;
+ aconnector->dc_sink = dc_sink;
+
+ if (aconnector->dc_sink)
+ amdgpu_dm_add_sink_to_freesync_module(
+ connector, edid);
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base, edid);
+ }
+
+ ret = dm_connector_update_modes(connector, aconnector->edid);
+
+ return ret;
+}
+
+static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ return &amdgpu_dm_connector->mst_encoder->base;
+}
+
+static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
+ .get_modes = dm_dp_mst_get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .best_encoder = dm_mst_best_encoder,
+};
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static struct amdgpu_encoder *
+dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_encoder *encoder;
+ const struct drm_connector_helper_funcs *connector_funcs =
+ connector->base.helper_private;
+ struct drm_encoder *enc_master =
+ connector_funcs->best_encoder(&connector->base);
+
+ DRM_DEBUG_KMS("enc master is %p\n", enc_master);
+ amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
+ if (!amdgpu_encoder)
+ return NULL;
+
+ encoder = &amdgpu_encoder->base;
+ encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ drm_encoder_init(
+ dev,
+ &amdgpu_encoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_DPMST,
+ NULL);
+
+ drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
+
+ return amdgpu_encoder;
+}
+
+static struct drm_connector *
+dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ const char *pathprop)
+{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->mst_port == master
+ && !aconnector->port) {
+ DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ aconnector->port = port;
+ drm_mode_connector_set_path_property(connector, pathprop);
+
+ drm_connector_list_iter_end(&conn_iter);
+ return &aconnector->base;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ return NULL;
+
+ connector = &aconnector->base;
+ aconnector->port = port;
+ aconnector->mst_port = master;
+
+ if (drm_connector_init(
+ dev,
+ connector,
+ &dm_dp_mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort)) {
+ kfree(aconnector);
+ return NULL;
+ }
+ drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
+
+ amdgpu_dm_connector_init_helper(
+ &adev->dm,
+ aconnector,
+ DRM_MODE_CONNECTOR_DisplayPort,
+ master->dc_link,
+ master->connector_id);
+
+ aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+
+ /*
+ * TODO: understand why this one is needed
+ */
+ drm_object_attach_property(
+ &connector->base,
+ dev->mode_config.path_property,
+ 0);
+ drm_object_attach_property(
+ &connector->base,
+ dev->mode_config.tile_property,
+ 0);
+
+ drm_mode_connector_set_path_property(connector, pathprop);
+
+ /*
+ * Initialize connector state before adding the connectror to drm and
+ * framebuffer lists
+ */
+ amdgpu_dm_connector_funcs_reset(connector);
+
+ DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ DRM_DEBUG_KMS(":%d\n", connector->base.id);
+
+ return connector;
+}
+
+static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
+ aconnector, connector->base.id, aconnector->mst_port);
+
+ aconnector->port = NULL;
+ if (aconnector->dc_sink) {
+ amdgpu_dm_remove_sink_from_freesync_module(connector);
+ dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ }
+ if (aconnector->edid) {
+ kfree(aconnector->edid);
+ aconnector->edid = NULL;
+ }
+
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ NULL);
+}
+
+static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static void dm_dp_mst_register_connector(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ if (adev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
+ else
+ DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
+
+ drm_connector_register(connector);
+
+}
+
+static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
+ .add_connector = dm_dp_add_mst_connector,
+ .destroy_connector = dm_dp_destroy_mst_connector,
+ .hotplug = dm_dp_mst_hotplug,
+ .register_connector = dm_dp_mst_register_connector
+};
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
+{
+ aconnector->dm_dp_aux.aux.name = "dmdc";
+ aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
+ aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
+ aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
+
+ drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ aconnector->mst_mgr.cbs = &dm_mst_cbs;
+ drm_dp_mst_topology_mgr_init(
+ &aconnector->mst_mgr,
+ dm->adev->ddev,
+ &aconnector->dm_dp_aux.aux,
+ 16,
+ 4,
+ aconnector->connector_id);
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
new file mode 100644
index 000000000000..2da851b40042
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__
+#define __DAL_AMDGPU_DM_MST_TYPES_H__
+
+struct amdgpu_display_manager;
+struct amdgpu_dm_connector;
+
+void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
new file mode 100644
index 000000000000..5df8fd5b537c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/acpi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/amdgpu_drm.h>
+#include "dm_services.h"
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "amdgpu_pm.h"
+
+unsigned long long dm_get_timestamp(struct dc_context *ctx)
+{
+ /* TODO: return actual timestamp */
+ return 0;
+}
+
+bool dm_write_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag)
+{
+ /*TODO implement*/
+ return false;
+}
+
+bool dm_read_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag)
+{
+ /*TODO implement*/
+ return false;
+}
+
+/**** power component interfaces ****/
+
+bool dm_pp_pre_dce_clock_change(
+ struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *requested_state,
+ struct dm_pp_gpu_clock_range *actual_state)
+{
+ /*TODO*/
+ return false;
+}
+
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+
+ if (adev->pm.dpm_enabled) {
+
+ memset(&adev->pm.pm_display_cfg, 0,
+ sizeof(adev->pm.pm_display_cfg));
+
+ adev->pm.pm_display_cfg.cpu_cc6_disable =
+ pp_display_cfg->cpu_cc6_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_disable =
+ pp_display_cfg->cpu_pstate_disable;
+
+ adev->pm.pm_display_cfg.cpu_pstate_separation_time =
+ pp_display_cfg->cpu_pstate_separation_time;
+
+ adev->pm.pm_display_cfg.nb_pstate_switch_disable =
+ pp_display_cfg->nb_pstate_switch_disable;
+
+ adev->pm.pm_display_cfg.num_display =
+ pp_display_cfg->display_count;
+ adev->pm.pm_display_cfg.num_path_including_non_display =
+ pp_display_cfg->display_count;
+
+ adev->pm.pm_display_cfg.min_core_set_clock =
+ pp_display_cfg->min_engine_clock_khz/10;
+ adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
+ pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
+ adev->pm.pm_display_cfg.min_mem_set_clock =
+ pp_display_cfg->min_memory_clock_khz/10;
+
+ adev->pm.pm_display_cfg.multi_monitor_in_sync =
+ pp_display_cfg->all_displays_in_sync;
+ adev->pm.pm_display_cfg.min_vblank_time =
+ pp_display_cfg->avail_mclk_switch_time_us;
+
+ adev->pm.pm_display_cfg.display_clk =
+ pp_display_cfg->disp_clk_khz/10;
+
+ adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
+
+ adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
+ adev->pm.pm_display_cfg.line_time_in_us =
+ pp_display_cfg->line_time_in_us;
+
+ adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
+ adev->pm.pm_display_cfg.crossfire_display_index = -1;
+ adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
+
+ /* TODO: complete implementation of
+ * amd_powerplay_display_configuration_change().
+ * Follow example of:
+ * PHM_StoreDALConfigurationData - powerplay\hwmgr\hardwaremanager.c
+ * PP_IRI_DisplayConfigurationChange - powerplay\eventmgr\iri.c */
+ amd_powerplay_display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+
+ /* TODO: replace by a separate call to 'apply display cfg'? */
+ amdgpu_pm_compute_clocks(adev);
+ }
+
+ return true;
+}
+
+bool dc_service_get_system_clocks_range(
+ const struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *sys_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+
+ /* Default values, in case PPLib is not compiled-in. */
+ sys_clks->mclk.max_khz = 800000;
+ sys_clks->mclk.min_khz = 800000;
+
+ sys_clks->sclk.max_khz = 600000;
+ sys_clks->sclk.min_khz = 300000;
+
+ if (adev->pm.dpm_enabled) {
+ sys_clks->mclk.max_khz = amdgpu_dpm_get_mclk(adev, false);
+ sys_clks->mclk.min_khz = amdgpu_dpm_get_mclk(adev, true);
+
+ sys_clks->sclk.max_khz = amdgpu_dpm_get_sclk(adev, false);
+ sys_clks->sclk.min_khz = amdgpu_dpm_get_sclk(adev, true);
+ }
+
+ return true;
+}
+
+static void get_default_clock_levels(
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clks)
+{
+ uint32_t disp_clks_in_khz[6] = {
+ 300000, 400000, 496560, 626090, 685720, 757900 };
+ uint32_t sclks_in_khz[6] = {
+ 300000, 360000, 423530, 514290, 626090, 720000 };
+ uint32_t mclks_in_khz[2] = { 333000, 800000 };
+
+ switch (clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, disp_clks_in_khz,
+ sizeof(disp_clks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ clks->num_levels = 6;
+ memmove(clks->clocks_in_khz, sclks_in_khz,
+ sizeof(sclks_in_khz));
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ clks->num_levels = 2;
+ memmove(clks->clocks_in_khz, mclks_in_khz,
+ sizeof(mclks_in_khz));
+ break;
+ default:
+ clks->num_levels = 0;
+ break;
+ }
+}
+
+static enum amd_pp_clock_type dc_to_pp_clock_type(
+ enum dm_pp_clock_type dm_pp_clk_type)
+{
+ enum amd_pp_clock_type amd_pp_clk_type = 0;
+
+ switch (dm_pp_clk_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ amd_pp_clk_type = amd_pp_disp_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_ENGINE_CLK:
+ amd_pp_clk_type = amd_pp_sys_clock;
+ break;
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ amd_pp_clk_type = amd_pp_mem_clock;
+ break;
+ default:
+ DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
+ dm_pp_clk_type);
+ break;
+ }
+
+ return amd_pp_clk_type;
+}
+
+static void pp_to_dc_clock_levels(
+ const struct amd_pp_clocks *pp_clks,
+ struct dm_pp_clock_levels *dc_clks,
+ enum dm_pp_clock_type dc_clk_type)
+{
+ uint32_t i;
+
+ if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
+ DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
+ pp_clks->count,
+ DM_PP_MAX_CLOCK_LEVELS);
+
+ dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
+ } else
+ dc_clks->num_levels = pp_clks->count;
+
+ DRM_INFO("DM_PPLIB: values for %s clock\n",
+ DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
+
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
+ /* translate 10kHz to kHz */
+ dc_clks->clocks_in_khz[i] = pp_clks->clock[i] * 10;
+ }
+}
+
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *dc_clks)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct amd_pp_clocks pp_clks = { 0 };
+ struct amd_pp_simple_clock_info validation_clks = { 0 };
+ uint32_t i;
+
+ if (amd_powerplay_get_clock_by_type(pp_handle,
+ dc_to_pp_clock_type(clk_type), &pp_clks)) {
+ /* Error in pplib. Provide default values. */
+ get_default_clock_levels(clk_type, dc_clks);
+ return true;
+ }
+
+ pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
+
+ if (amd_powerplay_get_display_mode_validation_clocks(pp_handle,
+ &validation_clks)) {
+ /* Error in pplib. Provide default values. */
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
+
+ DRM_INFO("DM_PPLIB: Validation clocks:\n");
+ DRM_INFO("DM_PPLIB: engine_max_clock: %d\n",
+ validation_clks.engine_max_clock);
+ DRM_INFO("DM_PPLIB: memory_max_clock: %d\n",
+ validation_clks.memory_max_clock);
+ DRM_INFO("DM_PPLIB: level : %d\n",
+ validation_clks.level);
+
+ /* Translate 10 kHz to kHz. */
+ validation_clks.engine_max_clock *= 10;
+ validation_clks.memory_max_clock *= 10;
+
+ /* Determine the highest non-boosted level from the Validation Clocks */
+ if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
+ /* This clock is higher the validation clock.
+ * Than means the previous one is the highest
+ * non-boosted one. */
+ DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
+ for (i = 0; i < dc_clks->num_levels; i++) {
+ if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
+ DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
+ dc_clks->num_levels, i);
+ dc_clks->num_levels = i > 0 ? i : 1;
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info)
+{
+ /* TODO: to be implemented */
+ return false;
+}
+
+void dm_pp_get_funcs_rv(
+ struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs)
+{}
+
+/**** end of power component interfaces ****/
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
new file mode 100644
index 000000000000..4f83e3011743
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -0,0 +1,33 @@
+#
+# Makefile for Display Core (dc) component.
+#
+
+DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+DC_LIBS += dcn10 dml
+endif
+
+DC_LIBS += dce120
+
+DC_LIBS += dce112
+DC_LIBS += dce110
+DC_LIBS += dce100
+DC_LIBS += dce80
+
+AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS)))
+
+include $(AMD_DC)
+
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o
+
+AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
+
+AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
+
+AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
+AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
new file mode 100644
index 000000000000..43c5ccdeeb72
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+BASICS = conversion.o fixpt31_32.o fixpt32_32.o grph_object_id.o \
+ logger.o log_helpers.o vector.o
+
+AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BASICS)
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
new file mode 100644
index 000000000000..23c9a0ec0181
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#define DIVIDER 10000
+
+/* S2D13 value in [-3.00...0.9999] */
+#define S2D13_MIN (-3 * DIVIDER)
+#define S2D13_MAX (3 * DIVIDER)
+
+uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits)
+{
+ int32_t numerator;
+ int32_t divisor = 1 << fractional_bits;
+
+ uint16_t result;
+
+ uint16_t d = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_abs(
+ arg));
+
+ if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
+ numerator = (uint16_t)dal_fixed31_32_floor(
+ dal_fixed31_32_mul_int(
+ arg,
+ divisor));
+ else {
+ numerator = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(
+ dal_fixed31_32_from_int(
+ 1LL << integer_bits),
+ dal_fixed31_32_recip(
+ dal_fixed31_32_from_int(
+ divisor))));
+ }
+
+ if (numerator >= 0)
+ result = (uint16_t)numerator;
+ else
+ result = (uint16_t)(
+ (1 << (integer_bits + fractional_bits + 1)) + numerator);
+
+ if ((result != 0) && dal_fixed31_32_lt(
+ arg, dal_fixed31_32_zero))
+ result |= 1 << (integer_bits + fractional_bits);
+
+ return result;
+}
+/**
+* convert_float_matrix
+* This converts a double into HW register spec defined format S2D13.
+* @param :
+* @return None
+*/
+void convert_float_matrix(
+ uint16_t *matrix,
+ struct fixed31_32 *flt,
+ uint32_t buffer_size)
+{
+ const struct fixed31_32 min_2_13 =
+ dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER);
+ const struct fixed31_32 max_2_13 =
+ dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER);
+ uint32_t i;
+
+ for (i = 0; i < buffer_size; ++i) {
+ uint32_t reg_value =
+ fixed_point_to_int_frac(
+ dal_fixed31_32_clamp(
+ flt[i],
+ min_2_13,
+ max_2_13),
+ 2,
+ 13);
+
+ matrix[i] = (uint16_t)reg_value;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.h b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
new file mode 100644
index 000000000000..ade785c4fdc7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_CONVERSION_H__
+#define __DAL_CONVERSION_H__
+
+#include "include/fixed31_32.h"
+
+uint16_t fixed_point_to_int_frac(
+ struct fixed31_32 arg,
+ uint8_t integer_bits,
+ uint8_t fractional_bits);
+
+void convert_float_matrix(
+ uint16_t *matrix,
+ struct fixed31_32 *flt,
+ uint32_t buffer_size);
+
+static inline unsigned int log_2(unsigned int num)
+{
+ return ilog2(num);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
new file mode 100644
index 000000000000..26936892c6f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed31_32.h"
+
+static inline uint64_t abs_i64(
+ int64_t arg)
+{
+ if (arg > 0)
+ return (uint64_t)arg;
+ else
+ return (uint64_t)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline uint64_t complete_integer_division_u64(
+ uint64_t dividend,
+ uint64_t divisor,
+ uint64_t *remainder)
+{
+ uint64_t result;
+
+ ASSERT(divisor);
+
+ result = div64_u64_rem(dividend, divisor, remainder);
+
+ return result;
+}
+
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+ ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+
+ uint64_t arg1_value = arg1_negative ? -numerator : numerator;
+ uint64_t arg2_value = arg2_negative ? -denominator : denominator;
+
+ uint64_t remainder;
+
+ /* determine integer part */
+
+ uint64_t res_value = complete_integer_division_u64(
+ arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= LONG_MAX);
+
+ /* determine fractional part */
+ {
+ uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint64_t summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= LLONG_MAX - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (int64_t)res_value;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(
+ int64_t arg)
+{
+ struct fixed31_32 res;
+
+ ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX));
+
+ res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift)));
+
+ res.value = arg.value << shift;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ struct fixed31_32 res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+ uint64_t arg1_int = GET_INTEGER_PART(arg1_value);
+ uint64_t arg2_int = GET_INTEGER_PART(arg2_value);
+
+ uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ uint64_t tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ uint64_t arg_value = abs_i64(arg.value);
+
+ uint64_t arg_int = GET_INTEGER_PART(arg_value);
+
+ uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+ uint64_t tmp;
+
+ res.value = arg_int * arg_int;
+
+ ASSERT(res.value <= LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg_int * arg_fra;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)dal_fixed31_32_half.value);
+
+ ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg)
+{
+ /*
+ * @note
+ * Good idea to use Newton's method
+ */
+
+ ASSERT(arg.value);
+
+ return dal_fixed31_32_from_fraction(
+ dal_fixed31_32_one.value,
+ arg.value);
+}
+
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 square;
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 27;
+
+ struct fixed31_32 arg_norm = arg;
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_two_pi,
+ dal_fixed31_32_abs(arg))) {
+ arg_norm = dal_fixed31_32_sub(
+ arg_norm,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_two_pi,
+ (int32_t)div64_s64(
+ arg_norm.value,
+ dal_fixed31_32_two_pi.value)));
+ }
+
+ square = dal_fixed31_32_sqr(arg_norm);
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+ res = dal_fixed31_32_div(
+ dal_fixed31_32_mul(res, arg_norm),
+ arg);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg)
+{
+ return dal_fixed31_32_mul(
+ arg,
+ dal_fixed31_32_sinc(arg));
+}
+
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg)
+{
+ /* TODO implement argument normalization */
+
+ const struct fixed31_32 square = dal_fixed31_32_sqr(arg);
+
+ struct fixed31_32 res = dal_fixed31_32_one;
+
+ int32_t n = 26;
+
+ do {
+ res = dal_fixed31_32_sub(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n != 0);
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct fixed31_32 fixed31_32_exp_from_taylor_series(
+ struct fixed31_32 arg)
+{
+ uint32_t n = 9;
+
+ struct fixed31_32 res = dal_fixed31_32_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+ ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one));
+
+ do
+ res = dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+ return dal_fixed31_32_add(
+ dal_fixed31_32_one,
+ dal_fixed31_32_mul(
+ arg,
+ res));
+}
+
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg)
+{
+ /*
+ * @brief
+ * Main equation is:
+ * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+ if (dal_fixed31_32_le(
+ dal_fixed31_32_ln2_div_2,
+ dal_fixed31_32_abs(arg))) {
+ int32_t m = dal_fixed31_32_round(
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_ln2));
+
+ struct fixed31_32 r = dal_fixed31_32_sub(
+ arg,
+ dal_fixed31_32_mul_int(
+ dal_fixed31_32_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+ ASSERT(dal_fixed31_32_lt(
+ dal_fixed31_32_abs(r),
+ dal_fixed31_32_one));
+
+ if (m > 0)
+ return dal_fixed31_32_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (uint8_t)m);
+ else
+ return dal_fixed31_32_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+ return dal_fixed31_32_one;
+}
+
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg)
+{
+ struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one);
+ /* TODO improve 1st estimation */
+
+ struct fixed31_32 error;
+
+ ASSERT(arg.value > 0);
+ /* TODO if arg is negative, return NaN */
+ /* TODO if arg is zero, return -INF */
+
+ do {
+ struct fixed31_32 res1 = dal_fixed31_32_add(
+ dal_fixed31_32_sub(
+ res,
+ dal_fixed31_32_one),
+ dal_fixed31_32_div(
+ arg,
+ dal_fixed31_32_exp(res)));
+
+ error = dal_fixed31_32_sub(
+ res,
+ res1);
+
+ res = res1;
+ /* TODO determine max_allowed_error based on quality of exp() */
+ } while (abs_i64(error.value) > 100ULL);
+
+ return res;
+}
+
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_exp(
+ dal_fixed31_32_mul(
+ dal_fixed31_32_log(arg1),
+ arg2));
+}
+
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_half.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg)
+{
+ uint64_t arg_value = abs_i64(arg.value);
+
+ const int64_t summand = dal_fixed31_32_one.value -
+ dal_fixed31_32_epsilon.value;
+
+ ASSERT(LLONG_MAX - (int64_t)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int32_t)GET_INTEGER_PART(arg_value);
+ else
+ return -(int32_t)GET_INTEGER_PART(arg_value);
+}
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline uint32_t ux_dy(
+ int64_t value,
+ uint32_t integer_bits,
+ uint32_t fractional_bits)
+{
+ /* 1. create mask of integer part */
+ uint32_t result = (1 << integer_bits) - 1;
+ /* 2. mask out fractional part */
+ uint32_t fractional_part = FRACTIONAL_PART_MASK & value;
+ /* 3. shrink fixed point integer part to be of integer_bits width*/
+ result &= GET_INTEGER_PART(value);
+ /* 4. make space for fractional part to be filled in after integer */
+ result <<= fractional_bits;
+ /* 5. shrink fixed point fractional part to of fractional_bits width*/
+ fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
+ /* 6. merge the result */
+ return result | fractional_part;
+}
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 2, 19);
+}
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 0, 19);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
new file mode 100644
index 000000000000..4d3aaa82a07b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/fixed32_32.h"
+
+static uint64_t u64_div(uint64_t n, uint64_t d)
+{
+ uint32_t i = 0;
+ uint64_t r;
+ uint64_t q = div64_u64_rem(n, d, &r);
+
+ for (i = 0; i < 32; ++i) {
+ uint64_t sbit = q & (1ULL<<63);
+
+ r <<= 1;
+ r |= sbit ? 1 : 0;
+ q <<= 1;
+ if (r >= d) {
+ r -= d;
+ q |= 1;
+ }
+ }
+
+ if (2*r >= d)
+ q += 1;
+ return q;
+}
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx = {lhs.value + rhs.value};
+
+ ASSERT(fx.value >= rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)};
+
+ ASSERT(fx.value >= (uint64_t)rhs << 32);
+ return fx;
+
+}
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= rhs.value);
+ fx.value = lhs.value - rhs.value;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ ASSERT(lhs.value >= ((uint64_t)rhs<<32));
+ fx.value = lhs.value - ((uint64_t)rhs<<32);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhs_int = lhs.value>>32;
+ uint64_t lhs_frac = (uint32_t)lhs.value;
+ uint64_t rhs_int = rhs.value>>32;
+ uint64_t rhs_frac = (uint32_t)rhs.value;
+ uint64_t ahbh = lhs_int * rhs_int;
+ uint64_t ahbl = lhs_int * rhs_frac;
+ uint64_t albh = lhs_frac * rhs_int;
+ uint64_t albl = lhs_frac * rhs_frac;
+
+ ASSERT((ahbh>>32) == 0);
+
+ fx.value = (ahbh<<32) + ahbl + albh + (albl>>32);
+ return fx;
+
+}
+
+struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+ uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs;
+ uint64_t lhsf;
+
+ ASSERT((lhsi>>32) == 0);
+ lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs;
+ ASSERT((lhsi<<32) + lhsf >= lhsf);
+ fx.value = (lhsi<<32) + lhsf;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, rhs.value);
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ struct fixed32_32 fx;
+
+ fx.value = u64_div(lhs.value, (uint64_t)rhs << 32);
+ return fx;
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 v)
+{
+ ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true);
+ return (v.value>>32) + ((uint32_t)v.value ? 1 : 0);
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 v)
+{
+ ASSERT(v.value + (1ULL<<31) >= (1ULL<<31));
+ return (v.value + (1ULL<<31))>>32;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
new file mode 100644
index 000000000000..147822545252
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/grph_object_id.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/grph_object_id.h"
+
+static bool dal_graphics_object_id_is_valid(struct graphics_object_id id)
+{
+ bool rc = true;
+
+ switch (id.type) {
+ case OBJECT_TYPE_UNKNOWN:
+ rc = false;
+ break;
+ case OBJECT_TYPE_GPU:
+ case OBJECT_TYPE_ENGINE:
+ /* do NOT check for id.id == 0 */
+ if (id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ default:
+ if (id.id == 0 || id.enum_id == ENUM_ID_UNKNOWN)
+ rc = false;
+ break;
+ }
+
+ return rc;
+}
+
+bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2)
+{
+ if (false == dal_graphics_object_id_is_valid(id1)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id1'!\n", __func__);
+ return false;
+ }
+
+ if (false == dal_graphics_object_id_is_valid(id2)) {
+ dm_output_to_console(
+ "%s: Warning: comparing invalid object 'id2'!\n", __func__);
+ return false;
+ }
+
+ if (id1.id == id2.id && id1.enum_id == id2.enum_id
+ && id1.type == id2.type)
+ return true;
+
+ return false;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
new file mode 100644
index 000000000000..6e43168fbdd6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "logger.h"
+#include "include/logger_interface.h"
+#include "dm_helpers.h"
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+struct dc_signal_type_info {
+ enum signal_type type;
+ char name[MAX_NAME_LEN];
+};
+
+static const struct dc_signal_type_info signal_type_info_tbl[] = {
+ {SIGNAL_TYPE_NONE, "NC"},
+ {SIGNAL_TYPE_DVI_SINGLE_LINK, "DVI"},
+ {SIGNAL_TYPE_DVI_DUAL_LINK, "DDVI"},
+ {SIGNAL_TYPE_HDMI_TYPE_A, "HDMIA"},
+ {SIGNAL_TYPE_LVDS, "LVDS"},
+ {SIGNAL_TYPE_RGB, "VGA"},
+ {SIGNAL_TYPE_DISPLAY_PORT, "DP"},
+ {SIGNAL_TYPE_DISPLAY_PORT_MST, "MST"},
+ {SIGNAL_TYPE_EDP, "eDP"},
+ {SIGNAL_TYPE_VIRTUAL, "Virtual"}
+};
+
+void dc_conn_log(struct dc_context *ctx,
+ const struct dc_link *link,
+ uint8_t *hex_data,
+ int hex_data_count,
+ enum dc_log_type event,
+ const char *msg,
+ ...)
+{
+ int i;
+ va_list args;
+ struct log_entry entry = { 0 };
+ enum signal_type signal;
+
+ if (link->local_sink)
+ signal = link->local_sink->sink_signal;
+ else
+ signal = link->connector_signal;
+
+ if (link->type == dc_connection_mst_branch)
+ signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+
+ dm_logger_open(ctx->logger, &entry, event);
+
+ for (i = 0; i < NUM_ELEMENTS(signal_type_info_tbl); i++)
+ if (signal == signal_type_info_tbl[i].type)
+ break;
+
+ if (i == NUM_ELEMENTS(signal_type_info_tbl))
+ goto fail;
+
+ dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
+ signal_type_info_tbl[i].name,
+ link->link_index);
+
+ va_start(args, msg);
+ entry.buf_offset += dm_log_to_buffer(
+ &entry.buf[entry.buf_offset],
+ LOG_MAX_LINE_SIZE - entry.buf_offset,
+ msg, args);
+
+ if (entry.buf[strlen(entry.buf) - 1] == '\n') {
+ entry.buf[strlen(entry.buf) - 1] = '\0';
+ entry.buf_offset--;
+ }
+
+ if (hex_data)
+ for (i = 0; i < hex_data_count; i++)
+ dm_logger_append(&entry, "%2.2X ", hex_data[i]);
+
+ dm_logger_append(&entry, "^\n");
+ dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
+ dm_logger_close(&entry);
+
+ va_end(args);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c
new file mode 100644
index 000000000000..e04e8ecd4874
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "include/logger_interface.h"
+#include "logger.h"
+
+
+#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+
+static const struct dc_log_type_info log_type_info_tbl[] = {
+ {LOG_ERROR, "Error"},
+ {LOG_WARNING, "Warning"},
+ {LOG_DEBUG, "Debug"},
+ {LOG_DC, "DC_Interface"},
+ {LOG_SURFACE, "Surface"},
+ {LOG_HW_HOTPLUG, "HW_Hotplug"},
+ {LOG_HW_LINK_TRAINING, "HW_LKTN"},
+ {LOG_HW_SET_MODE, "HW_Mode"},
+ {LOG_HW_RESUME_S3, "HW_Resume"},
+ {LOG_HW_AUDIO, "HW_Audio"},
+ {LOG_HW_HPD_IRQ, "HW_HPDIRQ"},
+ {LOG_MST, "MST"},
+ {LOG_SCALER, "Scaler"},
+ {LOG_BIOS, "BIOS"},
+ {LOG_BANDWIDTH_CALCS, "BWCalcs"},
+ {LOG_BANDWIDTH_VALIDATION, "BWValidation"},
+ {LOG_I2C_AUX, "I2C_AUX"},
+ {LOG_SYNC, "Sync"},
+ {LOG_BACKLIGHT, "Backlight"},
+ {LOG_FEATURE_OVERRIDE, "Override"},
+ {LOG_DETECTION_EDID_PARSER, "Edid"},
+ {LOG_DETECTION_DP_CAPS, "DP_Caps"},
+ {LOG_RESOURCE, "Resource"},
+ {LOG_DML, "DML"},
+ {LOG_EVENT_MODE_SET, "Mode"},
+ {LOG_EVENT_DETECTION, "Detect"},
+ {LOG_EVENT_LINK_TRAINING, "LKTN"},
+ {LOG_EVENT_LINK_LOSS, "LinkLoss"},
+ {LOG_EVENT_UNDERFLOW, "Underflow"},
+ {LOG_IF_TRACE, "InterfaceTrace"},
+ {LOG_DTN, "DTN"}
+};
+
+
+/* ----------- Object init and destruction ----------- */
+static bool construct(struct dc_context *ctx, struct dal_logger *logger,
+ uint32_t log_mask)
+{
+ /* malloc buffer and init offsets */
+ logger->log_buffer_size = DAL_LOGGER_BUFFER_MAX_SIZE;
+ logger->log_buffer = (char *)kzalloc(logger->log_buffer_size * sizeof(char),
+ GFP_KERNEL);
+
+ if (!logger->log_buffer)
+ return false;
+
+ /* Initialize both offsets to start of buffer (empty) */
+ logger->buffer_read_offset = 0;
+ logger->buffer_write_offset = 0;
+
+ logger->open_count = 0;
+
+ logger->flags.bits.ENABLE_CONSOLE = 1;
+ logger->flags.bits.ENABLE_BUFFER = 0;
+
+ logger->ctx = ctx;
+
+ logger->mask = log_mask;
+
+ return true;
+}
+
+static void destruct(struct dal_logger *logger)
+{
+ if (logger->log_buffer) {
+ kfree(logger->log_buffer);
+ logger->log_buffer = NULL;
+ }
+}
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask)
+{
+ /* malloc struct */
+ struct dal_logger *logger = kzalloc(sizeof(struct dal_logger),
+ GFP_KERNEL);
+
+ if (!logger)
+ return NULL;
+ if (!construct(ctx, logger, log_mask)) {
+ kfree(logger);
+ return NULL;
+ }
+
+ return logger;
+}
+
+uint32_t dal_logger_destroy(struct dal_logger **logger)
+{
+ if (logger == NULL || *logger == NULL)
+ return 1;
+ destruct(*logger);
+ kfree(*logger);
+ *logger = NULL;
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+
+static bool dal_logger_should_log(
+ struct dal_logger *logger,
+ enum dc_log_type log_type)
+{
+ if (logger->mask & (1 << log_type))
+ return true;
+
+ return false;
+}
+
+static void log_to_debug_console(struct log_entry *entry)
+{
+ struct dal_logger *logger = entry->logger;
+
+ if (logger->flags.bits.ENABLE_CONSOLE == 0)
+ return;
+
+ if (entry->buf_offset) {
+ switch (entry->type) {
+ case LOG_ERROR:
+ dm_error("%s", entry->buf);
+ break;
+ default:
+ dm_output_to_console("%s", entry->buf);
+ break;
+ }
+ }
+}
+
+/* Print everything unread existing in log_buffer to debug console*/
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn)
+{
+ char *string_start = &logger->log_buffer[logger->buffer_read_offset];
+
+ if (should_warn)
+ dm_output_to_console(
+ "---------------- FLUSHING LOG BUFFER ----------------\n");
+ while (logger->buffer_read_offset < logger->buffer_write_offset) {
+
+ if (logger->log_buffer[logger->buffer_read_offset] == '\0') {
+ dm_output_to_console("%s", string_start);
+ string_start = logger->log_buffer + logger->buffer_read_offset + 1;
+ }
+ logger->buffer_read_offset++;
+ }
+ if (should_warn)
+ dm_output_to_console(
+ "-------------- END FLUSHING LOG BUFFER --------------\n\n");
+}
+
+static void log_to_internal_buffer(struct log_entry *entry)
+{
+
+ uint32_t size = entry->buf_offset;
+ struct dal_logger *logger = entry->logger;
+
+ if (logger->flags.bits.ENABLE_BUFFER == 0)
+ return;
+
+ if (logger->log_buffer == NULL)
+ return;
+
+ if (size > 0 && size < logger->log_buffer_size) {
+
+ int buffer_space = logger->log_buffer_size -
+ logger->buffer_write_offset;
+
+ if (logger->buffer_write_offset == logger->buffer_read_offset) {
+ /* Buffer is empty, start writing at beginning */
+ buffer_space = logger->log_buffer_size;
+ logger->buffer_write_offset = 0;
+ logger->buffer_read_offset = 0;
+ }
+
+ if (buffer_space > size) {
+ /* No wrap around, copy 'size' bytes
+ * from 'entry->buf' to 'log_buffer'
+ */
+ memmove(logger->log_buffer +
+ logger->buffer_write_offset,
+ entry->buf, size);
+ logger->buffer_write_offset += size;
+
+ } else {
+ /* Not enough room remaining, we should flush
+ * existing logs */
+
+ /* Flush existing unread logs to console */
+ dm_logger_flush_buffer(logger, true);
+
+ /* Start writing to beginning of buffer */
+ memmove(logger->log_buffer, entry->buf, size);
+ logger->buffer_write_offset = size;
+ logger->buffer_read_offset = 0;
+ }
+
+ }
+}
+
+static void log_heading(struct log_entry *entry)
+{
+ int j;
+
+ for (j = 0; j < NUM_ELEMENTS(log_type_info_tbl); j++) {
+
+ const struct dc_log_type_info *info = &log_type_info_tbl[j];
+
+ if (info->type == entry->type)
+ dm_logger_append(entry, "[%s]\t", info->name);
+ }
+}
+
+static void append_entry(
+ struct log_entry *entry,
+ char *buffer,
+ uint32_t buf_size)
+{
+ if (!entry->buf ||
+ entry->buf_offset + buf_size > entry->max_buf_bytes
+ ) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* Todo: check if off by 1 byte due to \0 anywhere */
+ memmove(entry->buf + entry->buf_offset, buffer, buf_size);
+ entry->buf_offset += buf_size;
+}
+
+/* ------------------------------------------------------------------------ */
+
+/* Warning: Be careful that 'msg' is null terminated and the total size is
+ * less than DAL_LOGGER_BUFFER_MAX_LOG_LINE_SIZE (256) including '\0'
+ */
+void dm_logger_write(
+ struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ ...)
+{
+ if (logger && dal_logger_should_log(logger, log_type)) {
+ uint32_t size;
+ va_list args;
+ char buffer[LOG_MAX_LINE_SIZE];
+ struct log_entry entry;
+
+ va_start(args, msg);
+
+ entry.logger = logger;
+
+ entry.buf = buffer;
+
+ entry.buf_offset = 0;
+ entry.max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+ entry.type = log_type;
+
+ log_heading(&entry);
+
+ size = dm_log_to_buffer(
+ buffer, LOG_MAX_LINE_SIZE - 1, msg, args);
+
+ buffer[entry.buf_offset + size] = '\0';
+ entry.buf_offset += size + 1;
+
+ /* --Flush log_entry buffer-- */
+ /* print to kernel console */
+ log_to_debug_console(&entry);
+ /* log internally for dsat */
+ log_to_internal_buffer(&entry);
+
+ va_end(args);
+ }
+}
+
+/* Same as dm_logger_write, except without open() and close(), which must
+ * be done separately.
+ */
+void dm_logger_append(
+ struct log_entry *entry,
+ const char *msg,
+ ...)
+{
+ struct dal_logger *logger;
+
+ if (!entry) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ logger = entry->logger;
+
+ if (logger && logger->open_count > 0 &&
+ dal_logger_should_log(logger, entry->type)) {
+
+ uint32_t size;
+ va_list args;
+ char buffer[LOG_MAX_LINE_SIZE];
+
+ va_start(args, msg);
+
+ size = dm_log_to_buffer(
+ buffer, LOG_MAX_LINE_SIZE, msg, args);
+
+ if (size < LOG_MAX_LINE_SIZE - 1) {
+ append_entry(entry, buffer, size);
+ } else {
+ append_entry(entry, "LOG_ERROR, line too long\n", 27);
+ }
+
+ va_end(args);
+ }
+}
+
+void dm_logger_open(
+ struct dal_logger *logger,
+ struct log_entry *entry, /* out */
+ enum dc_log_type log_type)
+{
+ if (!entry) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ entry->type = log_type;
+ entry->logger = logger;
+
+ entry->buf = kzalloc(DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char),
+ GFP_KERNEL);
+
+ entry->buf_offset = 0;
+ entry->max_buf_bytes = DAL_LOGGER_BUFFER_MAX_SIZE * sizeof(char);
+
+ logger->open_count++;
+
+ log_heading(entry);
+}
+
+void dm_logger_close(struct log_entry *entry)
+{
+ struct dal_logger *logger = entry->logger;
+
+ if (logger && logger->open_count > 0) {
+ logger->open_count--;
+ } else {
+ BREAK_TO_DEBUGGER();
+ goto cleanup;
+ }
+
+ /* --Flush log_entry buffer-- */
+ /* print to kernel console */
+ log_to_debug_console(entry);
+ /* log internally for dsat */
+ log_to_internal_buffer(entry);
+
+ /* TODO: Write end heading */
+
+cleanup:
+ if (entry->buf) {
+ kfree(entry->buf);
+ entry->buf = NULL;
+ entry->buf_offset = 0;
+ entry->max_buf_bytes = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.h b/drivers/gpu/drm/amd/display/dc/basics/logger.h
new file mode 100644
index 000000000000..09722f0f8aa3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/logger.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_H__
+#define __DAL_LOGGER_H__
+
+
+#endif /* __DAL_LOGGER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
new file mode 100644
index 000000000000..217b8f1f7bf6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/vector.h"
+
+bool dal_vector_construct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size)
+{
+ vector->container = NULL;
+
+ if (!struct_size || !capacity) {
+ /* Container must be non-zero size*/
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ vector->container = kzalloc(struct_size * capacity, GFP_KERNEL);
+ if (vector->container == NULL)
+ return false;
+ vector->capacity = capacity;
+ vector->struct_size = struct_size;
+ vector->count = 0;
+ vector->ctx = ctx;
+ return true;
+}
+
+bool dal_vector_presized_costruct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t count,
+ void *initial_value,
+ uint32_t struct_size)
+{
+ uint32_t i;
+
+ vector->container = NULL;
+
+ if (!struct_size || !count) {
+ /* Container must be non-zero size*/
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ vector->container = kzalloc(struct_size * count, GFP_KERNEL);
+
+ if (vector->container == NULL)
+ return false;
+
+ /* If caller didn't supply initial value then the default
+ * of all zeros is expected, which is exactly what dal_alloc()
+ * initialises the memory to. */
+ if (NULL != initial_value) {
+ for (i = 0; i < count; ++i)
+ memmove(
+ vector->container + i * struct_size,
+ initial_value,
+ struct_size);
+ }
+
+ vector->capacity = count;
+ vector->struct_size = struct_size;
+ vector->count = count;
+ return true;
+}
+
+struct vector *dal_vector_presized_create(
+ struct dc_context *ctx,
+ uint32_t size,
+ void *initial_value,
+ uint32_t struct_size)
+{
+ struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+ if (vector == NULL)
+ return NULL;
+
+ if (dal_vector_presized_costruct(
+ vector, ctx, size, initial_value, struct_size))
+ return vector;
+
+ BREAK_TO_DEBUGGER();
+ kfree(vector);
+ return NULL;
+}
+
+struct vector *dal_vector_create(
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size)
+{
+ struct vector *vector = kzalloc(sizeof(struct vector), GFP_KERNEL);
+
+ if (vector == NULL)
+ return NULL;
+
+ if (dal_vector_construct(vector, ctx, capacity, struct_size))
+ return vector;
+
+ BREAK_TO_DEBUGGER();
+ kfree(vector);
+ return NULL;
+}
+
+void dal_vector_destruct(
+ struct vector *vector)
+{
+ kfree(vector->container);
+ vector->count = 0;
+ vector->capacity = 0;
+}
+
+void dal_vector_destroy(
+ struct vector **vector)
+{
+ if (vector == NULL || *vector == NULL)
+ return;
+ dal_vector_destruct(*vector);
+ kfree(*vector);
+ *vector = NULL;
+}
+
+uint32_t dal_vector_get_count(
+ const struct vector *vector)
+{
+ return vector->count;
+}
+
+void *dal_vector_at_index(
+ const struct vector *vector,
+ uint32_t index)
+{
+ if (vector->container == NULL || index >= vector->count)
+ return NULL;
+ return vector->container + (index * vector->struct_size);
+}
+
+bool dal_vector_remove_at_index(
+ struct vector *vector,
+ uint32_t index)
+{
+ if (index >= vector->count)
+ return false;
+
+ if (index != vector->count - 1)
+ memmove(
+ vector->container + (index * vector->struct_size),
+ vector->container + ((index + 1) * vector->struct_size),
+ (vector->count - index - 1) * vector->struct_size);
+ vector->count -= 1;
+
+ return true;
+}
+
+void dal_vector_set_at_index(
+ const struct vector *vector,
+ const void *what,
+ uint32_t index)
+{
+ void *where = dal_vector_at_index(vector, index);
+
+ if (!where) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ memmove(
+ where,
+ what,
+ vector->struct_size);
+}
+
+static inline uint32_t calc_increased_capacity(
+ uint32_t old_capacity)
+{
+ return old_capacity * 2;
+}
+
+bool dal_vector_insert_at(
+ struct vector *vector,
+ const void *what,
+ uint32_t position)
+{
+ uint8_t *insert_address;
+
+ if (vector->count == vector->capacity) {
+ if (!dal_vector_reserve(
+ vector,
+ calc_increased_capacity(vector->capacity)))
+ return false;
+ }
+
+ insert_address = vector->container + (vector->struct_size * position);
+
+ if (vector->count && position < vector->count)
+ memmove(
+ insert_address + vector->struct_size,
+ insert_address,
+ vector->struct_size * (vector->count - position));
+
+ memmove(
+ insert_address,
+ what,
+ vector->struct_size);
+
+ vector->count++;
+
+ return true;
+}
+
+bool dal_vector_append(
+ struct vector *vector,
+ const void *item)
+{
+ return dal_vector_insert_at(vector, item, vector->count);
+}
+
+struct vector *dal_vector_clone(
+ const struct vector *vector)
+{
+ struct vector *vec_cloned;
+ uint32_t count;
+
+ /* create new vector */
+ count = dal_vector_get_count(vector);
+
+ if (count == 0)
+ /* when count is 0 we still want to create clone of the vector
+ */
+ vec_cloned = dal_vector_create(
+ vector->ctx,
+ vector->capacity,
+ vector->struct_size);
+ else
+ /* Call "presized create" version, independently of how the
+ * original vector was created.
+ * The owner of original vector must know how to treat the new
+ * vector - as "presized" or as "regular".
+ * But from vector point of view it doesn't matter. */
+ vec_cloned = dal_vector_presized_create(vector->ctx, count,
+ NULL,/* no initial value */
+ vector->struct_size);
+
+ if (NULL == vec_cloned) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ /* copy vector's data */
+ memmove(vec_cloned->container, vector->container,
+ vec_cloned->struct_size * vec_cloned->capacity);
+
+ return vec_cloned;
+}
+
+uint32_t dal_vector_capacity(const struct vector *vector)
+{
+ return vector->capacity;
+}
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity)
+{
+ void *new_container;
+
+ if (capacity <= vector->capacity)
+ return true;
+
+ new_container = krealloc(vector->container,
+ capacity * vector->struct_size, GFP_KERNEL);
+
+ if (new_container) {
+ vector->container = new_container;
+ vector->capacity = capacity;
+ return true;
+ }
+
+ return false;
+}
+
+void dal_vector_clear(struct vector *vector)
+{
+ vector->count = 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/Makefile b/drivers/gpu/drm/amd/display/dc/bios/Makefile
new file mode 100644
index 000000000000..6ec815dce9cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for the 'bios' sub-component of DAL.
+# It provides the parsing and executing controls for atom bios image.
+
+BIOS = bios_parser.o bios_parser_interface.o bios_parser_helper.o command_table.o command_table_helper.o bios_parser_common.o
+
+BIOS += command_table2.o command_table_helper2.o bios_parser2.o
+
+AMD_DAL_BIOS = $(addprefix $(AMDDALPATH)/dc/bios/,$(BIOS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BIOS)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# All DCE8.x are derived from DCE8.0, so 8.0 MUST be defined if ANY of
+# DCE8.x is compiled.
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce80/command_table_helper_dce80.o
+
+###############################################################################
+# DCE 11x
+###############################################################################
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce110/command_table_helper_dce110.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper_dce112.o
+
+AMD_DISPLAY_FILES += $(AMDDALPATH)/dc/bios/dce112/command_table_helper2_dce112.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
new file mode 100644
index 000000000000..86e6438c5cf3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -0,0 +1,3871 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "dc_bios_types.h"
+#include "include/gpio_service_interface.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "bios_parser.h"
+#include "bios_parser_types_internal.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+/* TODO remove - only needed for default i2c speed */
+#include "dc.h"
+
+#define THREE_PERCENT_OF_10000 300
+
+#define LAST_RECORD_TYPE 0xff
+
+/* GUID to validate external display connection info table (aka OPM module) */
+static const uint8_t ext_display_connection_guid[NUMBER_OF_UCHAR_FOR_GUID] = {
+ 0x91, 0x6E, 0x57, 0x09,
+ 0x3F, 0x6D, 0xD2, 0x11,
+ 0x39, 0x8E, 0x00, 0xA0,
+ 0xC9, 0x69, 0x72, 0x3B};
+
+#define DATA_TABLES(table) (bp->master_data_tbl->ListOfDataTables.table)
+
+static void get_atom_data_table_revision(
+ ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+ struct atom_data_revision *tbl_revision);
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+ ATOM_OBJECT *object);
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list);
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+ ATOM_OBJECT *object, uint16_t **id_list);
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ struct graphics_object_id id);
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ ATOM_I2C_RECORD *record,
+ struct graphics_object_i2c_info *info);
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ ATOM_OBJECT *object);
+static struct device_id device_type_from_device_id(uint16_t device_id);
+static uint32_t signal_to_ss_id(enum as_signal_type signal);
+static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+/*****************************************************************************/
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+static uint8_t bios_parser_get_connectors_number(
+ struct dc_bios *dcb);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info);
+
+/*****************************************************************************/
+
+struct dc_bios *bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct bios_parser *bp = NULL;
+
+ bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+ if (!bp)
+ return NULL;
+
+ if (bios_parser_construct(bp, init, dce_version))
+ return &bp->base;
+
+ kfree(bp);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void destruct(struct bios_parser *bp)
+{
+ kfree(bp->base.bios_local_image);
+ kfree(bp->base.integrated_info);
+}
+
+static void bios_parser_destroy(struct dc_bios **dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+ if (!bp) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ destruct(bp);
+
+ kfree(bp);
+ *dcb = NULL;
+}
+
+static uint8_t get_number_of_objects(struct bios_parser *bp, uint32_t offset)
+{
+ ATOM_OBJECT_TABLE *table;
+
+ uint32_t object_table_offset = bp->object_info_tbl_offset + offset;
+
+ table = GET_IMAGE(ATOM_OBJECT_TABLE, object_table_offset);
+
+ if (!table)
+ return 0;
+ else
+ return table->ucNumberOfObjects;
+}
+
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ return get_number_of_objects(bp,
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset));
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+ struct dc_bios *dcb,
+ uint32_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ uint32_t encoder_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+
+ ATOM_OBJECT_TABLE *tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+ if (tbl && tbl->ucNumberOfObjects > i) {
+ const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+ object_id = object_id_from_bios_object_id(id);
+ }
+
+ return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ uint32_t connector_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+ ATOM_OBJECT_TABLE *tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, connector_table_offset);
+
+ if (tbl && tbl->ucNumberOfObjects > i) {
+ const uint16_t id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+
+ object_id = object_id_from_bios_object_id(id);
+ }
+
+ return object_id;
+}
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+ struct graphics_object_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object = get_bios_object(bp, id);
+
+ return get_dst_number_from_object(bp, object);
+}
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+{
+ uint32_t number;
+ uint16_t *id;
+ ATOM_OBJECT *object;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!src_object_id)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ number = get_src_obj_list(bp, object, &id);
+
+ if (number <= index)
+ return BP_RESULT_BADINPUT;
+
+ *src_object_id = object_id_from_bios_object_id(id[index]);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id)
+{
+ uint32_t number;
+ uint16_t *id = NULL;
+ ATOM_OBJECT *object;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!dest_object_id)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ number = get_dest_obj_list(bp, object, &id);
+
+ if (number <= index || !id)
+ return BP_RESULT_BADINPUT;
+
+ *dest_object_id = object_id_from_bios_object_id(id[index]);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ ATOM_OBJECT *object;
+ ATOM_COMMON_RECORD_HEADER *header;
+ ATOM_I2C_RECORD *record;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_I2C_RECORD) <= header->ucRecordSize) {
+ /* get the I2C info */
+ record = (ATOM_I2C_RECORD *) header;
+
+ if (get_gpio_i2c_info(bp, record, info) == BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+
+ offset += header->ucRecordSize;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_voltage_ddc_info_v1(uint8_t *i2c_line,
+ ATOM_COMMON_TABLE_HEADER *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ ATOM_VOLTAGE_OBJECT_INFO *info =
+ (ATOM_VOLTAGE_OBJECT_INFO *) address;
+
+ uint8_t *voltage_current_object = (uint8_t *) &info->asVoltageObj[0];
+
+ while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+ ATOM_VOLTAGE_OBJECT *object =
+ (ATOM_VOLTAGE_OBJECT *) voltage_current_object;
+
+ if ((object->ucVoltageType == SET_VOLTAGE_INIT_MODE) &&
+ (object->ucVoltageType &
+ VOLTAGE_CONTROLLED_BY_I2C_MASK)) {
+
+ *i2c_line = object->asControl.ucVoltageControlI2cLine
+ ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ voltage_current_object += object->ucSize;
+ }
+ return result;
+}
+
+static enum bp_result get_voltage_ddc_info_v3(uint8_t *i2c_line,
+ uint32_t index,
+ ATOM_COMMON_TABLE_HEADER *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ ATOM_VOLTAGE_OBJECT_INFO_V3_1 *info =
+ (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *) address;
+
+ uint8_t *voltage_current_object =
+ (uint8_t *) (&(info->asVoltageObj[0]));
+
+ while ((address + le16_to_cpu(header->usStructureSize)) > voltage_current_object) {
+ ATOM_I2C_VOLTAGE_OBJECT_V3 *object =
+ (ATOM_I2C_VOLTAGE_OBJECT_V3 *) voltage_current_object;
+
+ if (object->sHeader.ucVoltageMode ==
+ ATOM_INIT_VOLTAGE_REGULATOR) {
+ if (object->sHeader.ucVoltageType == index) {
+ *i2c_line = object->ucVoltageControlI2cLine
+ ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+ }
+
+ voltage_current_object += le16_to_cpu(object->sHeader.usSize);
+ }
+ return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+ struct dc_bios *dcb,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_I2C_ID_CONFIG_ACCESS *config;
+ ATOM_I2C_RECORD record;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_channel_id;
+
+ record.sucI2cId.bfHW_Capable = config->sbfAccess.bfHW_Capable;
+ record.sucI2cId.bfI2C_LineMux = config->sbfAccess.bfI2C_LineMux;
+ record.sucI2cId.bfHW_EngineID = config->sbfAccess.bfHW_EngineID;
+
+ return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+ uint32_t index,
+ struct graphics_object_i2c_info *info)
+{
+ uint8_t i2c_line = 0;
+ enum bp_result result = BP_RESULT_NORECORD;
+ uint8_t *voltage_info_address;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision = {0};
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!DATA_TABLES(VoltageObjectInfo))
+ return result;
+
+ voltage_info_address = bios_get_image(&bp->base, DATA_TABLES(VoltageObjectInfo), sizeof(ATOM_COMMON_TABLE_HEADER));
+
+ header = (ATOM_COMMON_TABLE_HEADER *) voltage_info_address;
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 1:
+ case 2:
+ result = get_voltage_ddc_info_v1(&i2c_line, header,
+ voltage_info_address);
+ break;
+ case 3:
+ if (revision.minor != 1)
+ break;
+ result = get_voltage_ddc_info_v3(&i2c_line, index, header,
+ voltage_info_address);
+ break;
+ }
+
+ if (result == BP_RESULT_OK)
+ result = bios_parser_get_thermal_ddc_info(dcb,
+ i2c_line, info);
+
+ return result;
+}
+
+/* TODO: temporary commented out to suppress 'defined but not used' warning */
+#if 0
+static enum bp_result bios_parser_get_ddc_info_for_i2c_line(
+ struct bios_parser *bp,
+ uint8_t i2c_line, struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ ATOM_OBJECT *object;
+ ATOM_OBJECT_TABLE *table;
+ uint32_t i;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+
+ offset += bp->object_info_tbl_offset;
+
+ table = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+
+ if (!table)
+ return BP_RESULT_BADBIOSTABLE;
+
+ for (i = 0; i < table->ucNumberOfObjects; i++) {
+ object = &table->asObjects[i];
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ ATOM_COMMON_RECORD_HEADER *header =
+ GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_I2C_RECORD) <=
+ header->ucRecordSize) {
+ ATOM_I2C_RECORD *record =
+ (ATOM_I2C_RECORD *) header;
+
+ if (i2c_line != record->sucI2cId.bfI2C_LineMux)
+ continue;
+
+ /* get the I2C info */
+ if (get_gpio_i2c_info(bp, record, info) ==
+ BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+ }
+ }
+
+ return BP_RESULT_NORECORD;
+}
+#endif
+
+static enum bp_result bios_parser_get_hpd_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_HPD_INT_RECORD *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_hpd_record(bp, object);
+
+ if (record != NULL) {
+ info->hpd_int_gpio_uid = record->ucHPDIntGPIOID;
+ info->hpd_active = record->ucPlugged_PinState;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object,
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD **record)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE !=
+ header->ucRecordType)
+ continue;
+
+ if (sizeof(ATOM_CONNECTOR_DEVICE_TAG) > header->ucRecordSize)
+ continue;
+
+ *record = (ATOM_CONNECTOR_DEVICE_TAG_RECORD *) header;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+ struct dc_bios *dcb,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *record = NULL;
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* getBiosObject will return MXM object */
+ object = get_bios_object(bp, connector_object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (bios_parser_get_device_tag_record(bp, object, &record)
+ != BP_RESULT_OK)
+ return BP_RESULT_NORECORD;
+
+ if (device_tag_index >= record->ucNumberOfDevice)
+ return BP_RESULT_NORECORD;
+
+ device_tag = &record->asDeviceTag[device_tag_index];
+
+ info->acpi_device = le32_to_cpu(device_tag->ulACPIDeviceEnum);
+ info->dev_id =
+ device_type_from_device_id(le16_to_cpu(device_tag->usDeviceID));
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v2_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(FirmwareInfo)) {
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(FirmwareInfo));
+ get_atom_data_table_revision(header, &revision);
+ switch (revision.major) {
+ case 1:
+ switch (revision.minor) {
+ case 4:
+ result = get_firmware_info_v1_4(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case 2:
+ switch (revision.minor) {
+ case 1:
+ result = get_firmware_info_v2_1(bp, info);
+ break;
+ case 2:
+ result = get_firmware_info_v2_2(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return result;
+}
+
+static enum bp_result get_firmware_info_v1_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V1_4 *firmware_info =
+ GET_IMAGE(ATOM_FIRMWARE_INFO_V1_4,
+ DATA_TABLES(FirmwareInfo));
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmware_info->usReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+
+ if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information on the SS, report conservative
+ * value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+ if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information on the SS,report conservative
+ * value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info);
+
+static enum bp_result get_firmware_info_v2_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V2_1 *firmwareInfo =
+ GET_IMAGE(ATOM_FIRMWARE_INFO_V2_1, DATA_TABLES(FirmwareInfo));
+ struct spread_spectrum_info internalSS;
+ uint32_t index;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!firmwareInfo)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmwareInfo->usCoreReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmwareInfo->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmwareInfo->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulMaxPixelClockPLL_Output) * 10;
+ info->default_display_engine_pll_frequency =
+ le32_to_cpu(firmwareInfo->ulDefaultDispEngineClkFreq) * 10;
+ info->external_clock_source_frequency_for_dp =
+ le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
+ info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+
+ /* There should be only one entry in the SS info table for Memory Clock
+ */
+ index = 0;
+ if (firmwareInfo->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_MEMORY_SS, index, &internalSS) == BP_RESULT_OK) {
+ if (internalSS.spread_spectrum_percentage) {
+ info->feature.memory_clk_ss_percentage =
+ internalSS.spread_spectrum_percentage;
+ if (internalSS.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.memory_clk_ss_percentage;
+ info->feature.memory_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* There should be only one entry in the SS info table for Engine Clock
+ */
+ index = 1;
+ if (firmwareInfo->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_ENGINE_SS, index, &internalSS) == BP_RESULT_OK) {
+ if (internalSS.spread_spectrum_percentage) {
+ info->feature.engine_clk_ss_percentage =
+ internalSS.spread_spectrum_percentage;
+ if (internalSS.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.engine_clk_ss_percentage;
+ info->feature.engine_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_firmware_info_v2_2(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+ struct spread_spectrum_info internal_ss;
+ uint32_t index;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(ATOM_FIRMWARE_INFO_V2_2,
+ DATA_TABLES(FirmwareInfo));
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. We need to convert from 10KHz units into
+ * KHz units */
+ info->pll_info.crystal_frequency =
+ le16_to_cpu(firmware_info->usCoreReferenceClock) * 10;
+ info->pll_info.min_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMinPixelClockPLL_Input) * 10;
+ info->pll_info.max_input_pxl_clk_pll_frequency =
+ le16_to_cpu(firmware_info->usMaxPixelClockPLL_Input) * 10;
+ info->pll_info.min_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
+ info->pll_info.max_output_pxl_clk_pll_frequency =
+ le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->default_display_engine_pll_frequency =
+ le32_to_cpu(firmware_info->ulDefaultDispEngineClkFreq) * 10;
+ info->external_clock_source_frequency_for_dp =
+ le16_to_cpu(firmware_info->usUniphyDPModeExtClkFreq) * 10;
+
+ /* There should be only one entry in the SS info table for Memory Clock
+ */
+ index = 0;
+ if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.memory_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_MEMORY_SS, index, &internal_ss) == BP_RESULT_OK) {
+ if (internal_ss.spread_spectrum_percentage) {
+ info->feature.memory_clk_ss_percentage =
+ internal_ss.spread_spectrum_percentage;
+ if (internal_ss.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.memory_clk_ss_percentage;
+ info->feature.memory_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* There should be only one entry in the SS info table for Engine Clock
+ */
+ index = 1;
+ if (firmware_info->usFirmwareCapability.sbfAccess.EngineClockSS_Support)
+ /* Since there is no information for external SS, report
+ * conservative value 3% for bandwidth calculation */
+ /* unit of 0.01% */
+ info->feature.engine_clk_ss_percentage = THREE_PERCENT_OF_10000;
+ else if (get_ss_info_v3_1(bp,
+ ASIC_INTERNAL_ENGINE_SS, index, &internal_ss) == BP_RESULT_OK) {
+ if (internal_ss.spread_spectrum_percentage) {
+ info->feature.engine_clk_ss_percentage =
+ internal_ss.spread_spectrum_percentage;
+ if (internal_ss.type.CENTER_MODE) {
+ /* if it is centermode, the exact SS Percentage
+ * will be round up of half of the percentage
+ * reported in the SS table */
+ ++info->feature.engine_clk_ss_percentage;
+ info->feature.engine_clk_ss_percentage /= 2;
+ }
+ }
+ }
+
+ /* Remote Display */
+ info->remote_display_config = firmware_info->ucRemoteDisplayConfig;
+
+ /* Is allowed minimum BL level */
+ info->min_allowed_bl_level = firmware_info->ucMinAllowedBL_Level;
+ /* Used starting from CI */
+ info->smu_gpu_pll_output_freq =
+ (uint32_t) (le32_to_cpu(firmware_info->ulGPUPLL_OutputFreq) * 10);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v3_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ ATOM_ASIC_INTERNAL_SS_INFO_V3 *ss_table_header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t table_index = 0;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ table_size =
+ (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+ &ss_table_header_include->asSpreadSpectrum[0];
+
+ memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+ for (i = 0; i < table_size; i++) {
+ if (tbl[i].ucClockIndication != (uint8_t) id)
+ continue;
+
+ if (table_index != index) {
+ table_index++;
+ continue;
+ }
+ /* VBIOS introduced new defines for Version 3, same values as
+ * before, so now use these new ones for Version 3.
+ * Shouldn't affect field VBIOS's V3 as define values are still
+ * same.
+ * #define SS_MODE_V3_CENTRE_SPREAD_MASK 0x01
+ * #define SS_MODE_V3_EXTERNAL_SS_MASK 0x02
+
+ * Old VBIOS defines:
+ * #define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+ * #define ATOM_EXTERNAL_SS_MASK 0x00000002
+ */
+
+ if (SS_MODE_V3_EXTERNAL_SS_MASK & tbl[i].ucSpreadSpectrumMode)
+ ss_info->type.EXTERNAL = true;
+
+ if (SS_MODE_V3_CENTRE_SPREAD_MASK & tbl[i].ucSpreadSpectrumMode)
+ ss_info->type.CENTER_MODE = true;
+
+ /* Older VBIOS (in field) always provides SS percentage in 0.01%
+ * units set Divider to 100 */
+ ss_info->spread_percentage_divider = 100;
+
+ /* #define SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK 0x10 */
+ if (SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
+ & tbl[i].ucSpreadSpectrumMode)
+ ss_info->spread_percentage_divider = 1000;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ /* convert [10KHz] into [KHz] */
+ ss_info->target_clock_range =
+ le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+ ss_info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+ ss_info->spread_spectrum_range =
+ (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+
+ return BP_RESULT_OK;
+ }
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+ struct dc_bios *dcb,
+ struct bp_transmitter_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.transmitter_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+ struct dc_bios *dcb,
+ struct bp_encoder_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.dig_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_adjust_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.adjust_display_pll)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.adjust_display_pll(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_pixel_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+ struct dc_bios *dcb,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_dce_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_spread_spectrum_on_ppll(
+ struct dc_bios *dcb,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_spread_spectrum_on_ppll)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_spread_spectrum_on_ppll(
+ bp, bp_params, enable);
+
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+ struct dc_bios *dcb,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_crtc_timing)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_program_display_engine_pll(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.program_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.program_clock(bp, bp_params);
+
+}
+
+
+static enum bp_result bios_parser_enable_crtc(
+ struct dc_bios *dcb,
+ enum controller_id id,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_crtc)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+ struct dc_bios *dcb,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_disp_power_gating)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+ action);
+}
+
+static bool bios_parser_is_device_id_supported(
+ struct dc_bios *dcb,
+ struct device_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ uint32_t mask = get_support_mask_for_device_id(id);
+
+ return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
+}
+
+static enum bp_result bios_parser_crt_control(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ bool enable,
+ uint32_t pixel_clock)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ uint8_t standard;
+
+ if (!bp->cmd_tbl.dac1_encoder_control &&
+ engine_id == ENGINE_ID_DACA)
+ return BP_RESULT_FAILURE;
+ if (!bp->cmd_tbl.dac2_encoder_control &&
+ engine_id == ENGINE_ID_DACB)
+ return BP_RESULT_FAILURE;
+ /* validate params */
+ switch (engine_id) {
+ case ENGINE_ID_DACA:
+ case ENGINE_ID_DACB:
+ break;
+ default:
+ /* unsupported engine */
+ return BP_RESULT_FAILURE;
+ }
+
+ standard = ATOM_DAC1_PS2; /* == ATOM_DAC2_PS2 */
+
+ if (enable) {
+ if (engine_id == ENGINE_ID_DACA) {
+ bp->cmd_tbl.dac1_encoder_control(bp, enable,
+ pixel_clock, standard);
+ if (bp->cmd_tbl.dac1_output_control != NULL)
+ bp->cmd_tbl.dac1_output_control(bp, enable);
+ } else {
+ bp->cmd_tbl.dac2_encoder_control(bp, enable,
+ pixel_clock, standard);
+ if (bp->cmd_tbl.dac2_output_control != NULL)
+ bp->cmd_tbl.dac2_output_control(bp, enable);
+ }
+ } else {
+ if (engine_id == ENGINE_ID_DACA) {
+ if (bp->cmd_tbl.dac1_output_control != NULL)
+ bp->cmd_tbl.dac1_output_control(bp, enable);
+ bp->cmd_tbl.dac1_encoder_control(bp, enable,
+ pixel_clock, standard);
+ } else {
+ if (bp->cmd_tbl.dac2_output_control != NULL)
+ bp->cmd_tbl.dac2_output_control(bp, enable);
+ bp->cmd_tbl.dac2_encoder_control(bp, enable,
+ pixel_clock, standard);
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_HPD_INT_RECORD_TYPE == header->ucRecordType
+ && sizeof(ATOM_HPD_INT_RECORD) <= header->ucRecordSize)
+ return (ATOM_HPD_INT_RECORD *) header;
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/**
+ * Get I2C information of input object id
+ *
+ * search all records to find the ATOM_I2C_RECORD_TYPE record IR
+ */
+static ATOM_I2C_RECORD *get_i2c_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *record_header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ record_header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!record_header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == record_header->ucRecordType ||
+ 0 == record_header->ucRecordSize)
+ break;
+
+ if (ATOM_I2C_RECORD_TYPE == record_header->ucRecordType &&
+ sizeof(ATOM_I2C_RECORD) <=
+ record_header->ucRecordSize) {
+ return (ATOM_I2C_RECORD *)record_header;
+ }
+
+ offset += record_header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+static enum bp_result get_ss_info_from_ss_info_table(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info);
+static enum bp_result get_ss_info_from_tbl(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info);
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info ver 3.1,
+ * there is only one entry for each signal /ss id. However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+ struct dc_bios *dcb,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ uint32_t clk_id_ss = 0;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!ss_info) /* check for bad input */
+ return BP_RESULT_BADINPUT;
+ /* signal translation */
+ clk_id_ss = signal_to_ss_id(signal);
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ if (!index)
+ return get_ss_info_from_ss_info_table(bp, clk_id_ss,
+ ss_info);
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ switch (tbl_revision.major) {
+ case 2:
+ switch (tbl_revision.minor) {
+ case 1:
+ /* there can not be more then one entry for Internal
+ * SS Info table version 2.1 */
+ if (!index)
+ return get_ss_info_from_tbl(bp, clk_id_ss,
+ ss_info);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ case 3:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_ss_info_v3_1(bp, clk_id_ss, index, ss_info);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /* there can not be more then one entry for SS Info table */
+ return result;
+}
+
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *info);
+
+/**
+ * get_ss_info_from_table
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param this
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_tbl(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info)
+{
+ if (!ss_info) /* check for bad input, if ss_info is not NULL */
+ return BP_RESULT_BADINPUT;
+ /* for SS_Info table only support DP and LVDS */
+ if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+ return get_ss_info_from_ss_info_table(bp, id, ss_info);
+ else
+ return get_ss_info_from_internal_ss_info_tbl_V2_1(bp, id,
+ ss_info);
+}
+
+/**
+ * get_ss_info_from_internal_ss_info_tbl_V2_1
+ * Get spread sprectrum information from the ASIC_InternalSS_Info table Ver 2.1
+ * from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @param pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *info)
+{
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ ATOM_ASIC_INTERNAL_SS_INFO_V2 *header;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+ uint32_t tbl_size, i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return result;
+
+ header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+ DATA_TABLES(ASIC_InternalSS_Info));
+
+ memset(info, 0, sizeof(struct spread_spectrum_info));
+
+ tbl_size = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+ &(header->asSpreadSpectrum[0]);
+ for (i = 0; i < tbl_size; i++) {
+ result = BP_RESULT_NORECORD;
+
+ if (tbl[i].ucClockIndication != (uint8_t)id)
+ continue;
+
+ if (ATOM_EXTERNAL_SS_MASK
+ & tbl[i].ucSpreadSpectrumMode) {
+ info->type.EXTERNAL = true;
+ }
+ if (ATOM_SS_CENTRE_SPREAD_MODE_MASK
+ & tbl[i].ucSpreadSpectrumMode) {
+ info->type.CENTER_MODE = true;
+ }
+ info->type.STEP_AND_DELAY_INFO = false;
+ /* convert [10KHz] into [KHz] */
+ info->target_clock_range =
+ le32_to_cpu(tbl[i].ulTargetClockRange) * 10;
+ info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl[i].usSpreadSpectrumPercentage);
+ info->spread_spectrum_range =
+ (uint32_t)(le16_to_cpu(tbl[i].usSpreadRateIn10Hz) * 10);
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ return result;
+
+}
+
+/**
+ * get_ss_info_from_ss_info_table
+ * Get spread sprectrum information from the SS_Info table from the VBIOS
+ * if the pointer to info is NULL, indicate the caller what to know the number
+ * of entries that matches the id
+ * for, the SS_Info table, there should not be more than 1 entry match.
+ *
+ * @param [in] id, spread sprectrum id
+ * @param [out] pSSinfo, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result get_ss_info_from_ss_info_table(
+ struct bios_parser *bp,
+ uint32_t id,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ ATOM_SPREAD_SPECTRUM_INFO *tbl;
+ ATOM_COMMON_TABLE_HEADER *header;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t id_local = SS_ID_UNKNOWN;
+ struct atom_data_revision revision;
+
+ /* exist of the SS_Info table */
+ /* check for bad input, pSSinfo can not be NULL */
+ if (!DATA_TABLES(SS_Info) || !ss_info)
+ return result;
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(SS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info));
+
+ if (1 != revision.major || 2 > revision.minor)
+ return result;
+
+ /* have to convert from Internal_SS format to SS_Info format */
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_DP:
+ id_local = SS_ID_DP1;
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS:
+ {
+ struct embedded_panel_info panel_info;
+
+ if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+ == BP_RESULT_OK)
+ id_local = panel_info.ss_id;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (id_local == SS_ID_UNKNOWN)
+ return result;
+
+ table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+ for (i = 0; i < table_size; i++) {
+ if (id_local != (uint32_t)tbl->asSS_Info[i].ucSS_Id)
+ continue;
+
+ memset(ss_info, 0, sizeof(struct spread_spectrum_info));
+
+ if (ATOM_EXTERNAL_SS_MASK &
+ tbl->asSS_Info[i].ucSpreadSpectrumType)
+ ss_info->type.EXTERNAL = true;
+
+ if (ATOM_SS_CENTRE_SPREAD_MODE_MASK &
+ tbl->asSS_Info[i].ucSpreadSpectrumType)
+ ss_info->type.CENTER_MODE = true;
+
+ ss_info->type.STEP_AND_DELAY_INFO = true;
+ ss_info->spread_spectrum_percentage =
+ (uint32_t)le16_to_cpu(tbl->asSS_Info[i].usSpreadSpectrumPercentage);
+ ss_info->step_and_delay_info.step = tbl->asSS_Info[i].ucSS_Step;
+ ss_info->step_and_delay_info.delay =
+ tbl->asSS_Info[i].ucSS_Delay;
+ ss_info->step_and_delay_info.recommended_ref_div =
+ tbl->asSS_Info[i].ucRecommendedRef_Div;
+ ss_info->spread_spectrum_range =
+ (uint32_t)tbl->asSS_Info[i].ucSS_Range * 10000;
+
+ /* there will be only one entry for each display type in SS_info
+ * table */
+ result = BP_RESULT_OK;
+ break;
+ }
+
+ return result;
+}
+static enum bp_result get_embedded_panel_info_v1_2(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info);
+static enum bp_result get_embedded_panel_info_v1_3(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info);
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_COMMON_TABLE_HEADER *hdr;
+
+ if (!DATA_TABLES(LCD_Info))
+ return BP_RESULT_FAILURE;
+
+ hdr = GET_IMAGE(ATOM_COMMON_TABLE_HEADER, DATA_TABLES(LCD_Info));
+
+ if (!hdr)
+ return BP_RESULT_BADBIOSTABLE;
+
+ switch (hdr->ucTableFormatRevision) {
+ case 1:
+ switch (hdr->ucTableContentRevision) {
+ case 0:
+ case 1:
+ case 2:
+ return get_embedded_panel_info_v1_2(bp, info);
+ case 3:
+ return get_embedded_panel_info_v1_3(bp, info);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
+static enum bp_result get_embedded_panel_info_v1_2(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ ATOM_LVDS_INFO_V12 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(LVDS_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds =
+ GET_IMAGE(ATOM_LVDS_INFO_V12, DATA_TABLES(LVDS_Info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != lvds->sHeader.ucTableFormatRevision
+ || 2 > lvds->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units*/
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+ /* usHActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usHActive);
+ /* usHBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usVActive);
+ /* usVBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+ info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+ info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+ info->lcd_timing.misc_info.INTERLACE =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+ info->lcd_timing.misc_info.DOUBLE_CLOCK =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+ info->ss_id = lvds->ucSS_Id;
+
+ {
+ uint8_t rr = le16_to_cpu(lvds->usSupportedRefreshRate);
+ /* Get minimum supported refresh rate*/
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ }
+
+ /*Drr panel support can be reported by VBIOS*/
+ if (LCDPANEL_CAP_DRR_SUPPORTED
+ & lvds->ucLCDPanel_SpecialHandlingCap)
+ info->drr_enabled = 1;
+
+ if (ATOM_PANEL_MISC_DUAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+ if (ATOM_PANEL_MISC_888RGB & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.RGB888 = true;
+
+ info->lcd_timing.misc_info.GREY_LEVEL =
+ (uint32_t) (ATOM_PANEL_MISC_GREY_LEVEL &
+ lvds->ucLVDS_Misc) >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT;
+
+ if (ATOM_PANEL_MISC_SPATIAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.SPATIAL = true;
+
+ if (ATOM_PANEL_MISC_TEMPORAL & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.TEMPORAL = true;
+
+ if (ATOM_PANEL_MISC_API_ENABLED & lvds->ucLVDS_Misc)
+ info->lcd_timing.misc_info.API_ENABLED = true;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_embedded_panel_info_v1_3(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ ATOM_LCD_INFO_V13 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(LCD_Info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds = GET_IMAGE(ATOM_LCD_INFO_V13, DATA_TABLES(LCD_Info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (!((1 == lvds->sHeader.ucTableFormatRevision)
+ && (3 <= lvds->sHeader.ucTableContentRevision)))
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units */
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->sLCDTiming.usPixClk) * 10;
+ /* usHActive does not include borders, according to VBIOS team */
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usHActive);
+ /* usHBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usHBlanking_Time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->sLCDTiming.usVActive);
+ /* usVBlanking_Time includes borders, so we should really be subtracting
+ * borders duing this translation, but LVDS generally*/
+ /* doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders*/
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->sLCDTiming.usVBlanking_Time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncOffset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usHSyncWidth);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncOffset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->sLCDTiming.usVSyncWidth);
+ info->lcd_timing.horizontal_border = lvds->sLCDTiming.ucHBorder;
+ info->lcd_timing.vertical_border = lvds->sLCDTiming.ucVBorder;
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HorizontalCutOff;
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.HSyncPolarity;
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VSyncPolarity;
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.VerticalCutOff;
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.H_ReplicationBy2;
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.V_ReplicationBy2;
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.CompositeSync;
+ info->lcd_timing.misc_info.INTERLACE =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.Interlace;
+ info->lcd_timing.misc_info.DOUBLE_CLOCK =
+ lvds->sLCDTiming.susModeMiscInfo.sbfAccess.DoubleClock;
+ info->ss_id = lvds->ucSS_Id;
+
+ /* Drr panel support can be reported by VBIOS*/
+ if (LCDPANEL_CAP_V13_DRR_SUPPORTED
+ & lvds->ucLCDPanel_SpecialHandlingCap)
+ info->drr_enabled = 1;
+
+ /* Get supported refresh rate*/
+ if (info->drr_enabled == 1) {
+ uint8_t min_rr =
+ lvds->sRefreshRateSupport.ucMinRefreshRateForDRR;
+ uint8_t rr = lvds->sRefreshRateSupport.ucSupportedRefreshRate;
+
+ if (min_rr != 0) {
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & min_rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ } else {
+ if (SUPPORTED_LCD_REFRESHRATE_30Hz & rr)
+ info->supported_rr.REFRESH_RATE_30HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_40Hz & rr)
+ info->supported_rr.REFRESH_RATE_40HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_48Hz & rr)
+ info->supported_rr.REFRESH_RATE_48HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_50Hz & rr)
+ info->supported_rr.REFRESH_RATE_50HZ = 1;
+ else if (SUPPORTED_LCD_REFRESHRATE_60Hz & rr)
+ info->supported_rr.REFRESH_RATE_60HZ = 1;
+ }
+ }
+
+ if (ATOM_PANEL_MISC_V13_DUAL & lvds->ucLCD_Misc)
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = true;
+
+ if (ATOM_PANEL_MISC_V13_8BIT_PER_COLOR & lvds->ucLCD_Misc)
+ info->lcd_timing.misc_info.RGB888 = true;
+
+ info->lcd_timing.misc_info.GREY_LEVEL =
+ (uint32_t) (ATOM_PANEL_MISC_V13_GREY_LEVEL &
+ lvds->ucLCD_Misc) >> ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT;
+
+ return BP_RESULT_OK;
+}
+
+/**
+ * bios_parser_get_encoder_cap_info
+ *
+ * @brief
+ * Get encoder capability information of input object id
+ *
+ * @param object_id, Object id
+ * @param object_id, encoder cap information structure
+ *
+ * @return Bios parser result code
+ *
+ */
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_OBJECT *object;
+ ATOM_ENCODER_CAP_RECORD_V2 *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_encoder_cap_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->DP_HBR2_EN = record->usHBR2En;
+ info->DP_HBR3_EN = record->usHBR3En;
+ info->HDMI_6GB_EN = record->usHDMI6GEn;
+ return BP_RESULT_OK;
+}
+
+/**
+ * get_encoder_cap_record
+ *
+ * @brief
+ * Get encoder cap record for the object
+ *
+ * @param object, ATOM object
+ *
+ * @return atom encoder cap record
+ *
+ * @note
+ * search all records to find the ATOM_ENCODER_CAP_RECORD_V2 record
+ */
+static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ ATOM_COMMON_RECORD_HEADER *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ offset += header->ucRecordSize;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ !header->ucRecordSize)
+ break;
+
+ if (ATOM_ENCODER_CAP_RECORD_TYPE != header->ucRecordType)
+ continue;
+
+ if (sizeof(ATOM_ENCODER_CAP_RECORD_V2) <= header->ucRecordSize)
+ return (ATOM_ENCODER_CAP_RECORD_V2 *)header;
+ }
+
+ return NULL;
+}
+
+static uint32_t get_ss_entry_number(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ struct bios_parser *bp,
+ uint32_t id);
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+ struct bios_parser *bp,
+ uint32_t id);
+
+/**
+ * BiosParserObject::GetNumberofSpreadSpectrumEntry
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
+ * the VBIOS that match the SSid (to be converted from signal)
+ *
+ * @param[in] signal, ASSignalType to be converted to SSid
+ * @return number of SS Entry that match the signal
+ */
+static uint32_t bios_parser_get_ss_entry_number(
+ struct dc_bios *dcb,
+ enum as_signal_type signal)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ uint32_t ss_id = 0;
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ ss_id = signal_to_ss_id(signal);
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return get_ss_entry_number_from_ss_info_tbl(bp, ss_id);
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 2:
+ switch (revision.minor) {
+ case 1:
+ return get_ss_entry_number(bp, ss_id);
+ default:
+ break;
+ }
+ break;
+ case 3:
+ switch (revision.minor) {
+ case 1:
+ return
+ get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ bp, ss_id);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * get_ss_entry_number_from_ss_info_tbl
+ * Get Number of spread spectrum entry from the SS_Info table from the VBIOS.
+ *
+ * @note There can only be one entry for each id for SS_Info Table
+ *
+ * @param [in] id, spread spectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_ss_info_tbl(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ ATOM_SPREAD_SPECTRUM_INFO *tbl;
+ ATOM_COMMON_TABLE_HEADER *header;
+ uint32_t table_size;
+ uint32_t i;
+ uint32_t number = 0;
+ uint32_t id_local = SS_ID_UNKNOWN;
+ struct atom_data_revision revision;
+
+ /* SS_Info table exist */
+ if (!DATA_TABLES(SS_Info))
+ return number;
+
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ DATA_TABLES(SS_Info));
+ get_atom_data_table_revision(header, &revision);
+
+ tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO,
+ DATA_TABLES(SS_Info));
+
+ if (1 != revision.major || 2 > revision.minor)
+ return number;
+
+ /* have to convert from Internal_SS format to SS_Info format */
+ switch (id) {
+ case ASIC_INTERNAL_SS_ON_DP:
+ id_local = SS_ID_DP1;
+ break;
+ case ASIC_INTERNAL_SS_ON_LVDS: {
+ struct embedded_panel_info panel_info;
+
+ if (bios_parser_get_embedded_panel_info(&bp->base, &panel_info)
+ == BP_RESULT_OK)
+ id_local = panel_info.ss_id;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (id_local == SS_ID_UNKNOWN)
+ return number;
+
+ table_size = (le16_to_cpu(tbl->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT);
+
+ for (i = 0; i < table_size; i++)
+ if (id_local == (uint32_t)tbl->asSS_Info[i].ucSS_Id) {
+ number = 1;
+ break;
+ }
+
+ return number;
+}
+
+/**
+ * get_ss_entry_number
+ * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info table from the VBIOS
+ * There can not be more than 1 entry for ASIC_InternalSS_Info Ver 2.1 or
+ * SS_Info.
+ *
+ * @param id, spread sprectrum info index
+ * @return Bios parser result code
+ */
+static uint32_t get_ss_entry_number(struct bios_parser *bp, uint32_t id)
+{
+ if (id == ASIC_INTERNAL_SS_ON_DP || id == ASIC_INTERNAL_SS_ON_LVDS)
+ return get_ss_entry_number_from_ss_info_tbl(bp, id);
+
+ return get_ss_entry_number_from_internal_ss_info_tbl_v2_1(bp, id);
+}
+
+/**
+ * get_ss_entry_number_from_internal_ss_info_tbl_v2_1
+ * Get NUmber of spread sprectrum entry from the ASIC_InternalSS_Info table
+ * Ver 2.1 from the VBIOS
+ * There will not be multiple entry for Ver 2.1
+ *
+ * @param id, spread sprectrum info index
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ ATOM_ASIC_INTERNAL_SS_INFO_V2 *header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 *tbl;
+ uint32_t size;
+ uint32_t i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return 0;
+
+ header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2,
+ DATA_TABLES(ASIC_InternalSS_Info));
+
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V2 *)
+ &header_include->asSpreadSpectrum[0];
+ for (i = 0; i < size; i++)
+ if (tbl[i].ucClockIndication == (uint8_t)id)
+ return 1;
+
+ return 0;
+}
+/**
+ * get_ss_entry_number_from_internal_ss_info_table_V3_1
+ * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
+ * the VBIOS that matches id
+ *
+ * @param[in] id, spread sprectrum id
+ * @return number of SS Entry that match the id
+ */
+static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1(
+ struct bios_parser *bp,
+ uint32_t id)
+{
+ uint32_t number = 0;
+ ATOM_ASIC_INTERNAL_SS_INFO_V3 *header_include;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 *tbl;
+ uint32_t size;
+ uint32_t i;
+
+ if (!DATA_TABLES(ASIC_InternalSS_Info))
+ return number;
+
+ header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3,
+ DATA_TABLES(ASIC_InternalSS_Info));
+ size = (le16_to_cpu(header_include->sHeader.usStructureSize) -
+ sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
+
+ tbl = (ATOM_ASIC_SS_ASSIGNMENT_V3 *)
+ &header_include->asSpreadSpectrum[0];
+
+ for (i = 0; i < size; i++)
+ if (tbl[i].ucClockIndication == (uint8_t)id)
+ number++;
+
+ return number;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ * to get the GPIO PIN INFO, we need:
+ * 1. get the GPIO_ID from other object table, see GetHPDInfo()
+ * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records, to get the registerA
+ * offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+ struct dc_bios *dcb,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ ATOM_GPIO_PIN_LUT *header;
+ uint32_t count = 0;
+ uint32_t i = 0;
+
+ if (!DATA_TABLES(GPIO_Pin_LUT))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(ATOM_GPIO_PIN_LUT, DATA_TABLES(GPIO_Pin_LUT));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_PIN_LUT)
+ > le16_to_cpu(header->sHeader.usStructureSize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != header->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ count = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+ for (i = 0; i < count; ++i) {
+ if (header->asGPIO_Pin[i].ucGPIO_ID != gpio_id)
+ continue;
+
+ info->offset =
+ (uint32_t) le16_to_cpu(header->asGPIO_Pin[i].usGpioPin_AIndex);
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask = (uint32_t) (1 <<
+ header->asGPIO_Pin[i].ucGpioPinBitShift);
+ info->mask_y = info->mask + 2;
+ info->mask_en = info->mask + 1;
+ info->mask_mask = info->mask - 1;
+
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ ATOM_I2C_RECORD *record,
+ struct graphics_object_i2c_info *info)
+{
+ ATOM_GPIO_I2C_INFO *header;
+ uint32_t count = 0;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* get the GPIO_I2C info */
+ if (!DATA_TABLES(GPIO_I2C_Info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(ATOM_GPIO_I2C_INFO, DATA_TABLES(GPIO_I2C_Info));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(ATOM_COMMON_TABLE_HEADER) + sizeof(ATOM_GPIO_I2C_ASSIGMENT)
+ > le16_to_cpu(header->sHeader.usStructureSize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (1 != header->sHeader.ucTableContentRevision)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* get data count */
+ count = (le16_to_cpu(header->sHeader.usStructureSize)
+ - sizeof(ATOM_COMMON_TABLE_HEADER))
+ / sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ if (count < record->sucI2cId.bfI2C_LineMux)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* get the GPIO_I2C_INFO */
+ info->i2c_hw_assist = record->sucI2cId.bfHW_Capable;
+ info->i2c_line = record->sucI2cId.bfI2C_LineMux;
+ info->i2c_engine_id = record->sucI2cId.bfHW_EngineID;
+ info->i2c_slave_address = record->ucI2CAddr;
+
+ info->gpio_info.clk_mask_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkMaskRegisterIndex);
+ info->gpio_info.clk_en_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkEnRegisterIndex);
+ info->gpio_info.clk_y_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkY_RegisterIndex);
+ info->gpio_info.clk_a_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usClkA_RegisterIndex);
+ info->gpio_info.data_mask_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataMaskRegisterIndex);
+ info->gpio_info.data_en_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataEnRegisterIndex);
+ info->gpio_info.data_y_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataY_RegisterIndex);
+ info->gpio_info.data_a_register_index =
+ le16_to_cpu(header->asGPIO_Info[info->i2c_line].usDataA_RegisterIndex);
+
+ info->gpio_info.clk_mask_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkMaskShift;
+ info->gpio_info.clk_en_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkEnShift;
+ info->gpio_info.clk_y_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkY_Shift;
+ info->gpio_info.clk_a_shift =
+ header->asGPIO_Info[info->i2c_line].ucClkA_Shift;
+ info->gpio_info.data_mask_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataMaskShift;
+ info->gpio_info.data_en_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataEnShift;
+ info->gpio_info.data_y_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataY_Shift;
+ info->gpio_info.data_a_shift =
+ header->asGPIO_Info[info->i2c_line].ucDataA_Shift;
+
+ return BP_RESULT_OK;
+}
+
+static ATOM_OBJECT *get_bios_object(struct bios_parser *bp,
+ struct graphics_object_id id)
+{
+ uint32_t offset;
+ ATOM_OBJECT_TABLE *tbl;
+ uint32_t i;
+
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_CONNECTOR:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_ROUTER:
+ offset = le16_to_cpu(bp->object_info_tbl.v1_1->usRouterObjectTableOffset);
+ break;
+
+ case OBJECT_TYPE_GENERIC:
+ if (bp->object_info_tbl.revision.minor < 3)
+ return NULL;
+ offset = le16_to_cpu(bp->object_info_tbl.v1_3->usMiscObjectTableOffset);
+ break;
+
+ default:
+ return NULL;
+ }
+
+ offset += bp->object_info_tbl_offset;
+
+ tbl = GET_IMAGE(ATOM_OBJECT_TABLE, offset);
+ if (!tbl)
+ return NULL;
+
+ for (i = 0; i < tbl->ucNumberOfObjects; i++)
+ if (dal_graphics_object_id_is_equal(id,
+ object_id_from_bios_object_id(
+ le16_to_cpu(tbl->asObjects[i].usObjectID))))
+ return &tbl->asObjects[i];
+
+ return NULL;
+}
+
+static uint32_t get_dest_obj_list(struct bios_parser *bp,
+ ATOM_OBJECT *object, uint16_t **id_list)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ offset += sizeof(uint16_t) * (*number);
+
+ number = GET_IMAGE(uint8_t, offset);
+ if ((!number) || (!*number))
+ return 0;
+
+ offset += sizeof(uint8_t);
+ *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+ if (!*id_list)
+ return 0;
+
+ return *number;
+}
+
+static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
+ uint16_t **id_list)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ *id_list = (uint16_t *)bios_get_image(&bp->base, offset, *number * sizeof(uint16_t));
+
+ if (!*id_list)
+ return 0;
+
+ return *number;
+}
+
+static uint32_t get_dst_number_from_object(struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ uint8_t *number;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid encoder object id*/
+ return 0;
+ }
+
+ offset = le16_to_cpu(object->usSrcDstTableOffset)
+ + bp->object_info_tbl_offset;
+
+ number = GET_IMAGE(uint8_t, offset);
+ if (!number)
+ return 0;
+
+ offset += sizeof(uint8_t);
+ offset += sizeof(uint16_t) * (*number);
+
+ number = GET_IMAGE(uint8_t, offset);
+
+ if (!number)
+ return 0;
+
+ return *number;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+ struct device_id result_device_id;
+
+ switch (device_id) {
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_LCD2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_CRT;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_CRT2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_CRT;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 3;
+ break;
+
+ case ATOM_DEVICE_DFP4_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 4;
+ break;
+
+ case ATOM_DEVICE_DFP5_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 5;
+ break;
+
+ case ATOM_DEVICE_DFP6_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 6;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid device Id */
+ result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+ result_device_id.enum_id = 0;
+ }
+ return result_device_id;
+}
+
+static void get_atom_data_table_revision(
+ ATOM_COMMON_TABLE_HEADER *atom_data_tbl,
+ struct atom_data_revision *tbl_revision)
+{
+ if (!tbl_revision)
+ return;
+
+ /* initialize the revision to 0 which is invalid revision */
+ tbl_revision->major = 0;
+ tbl_revision->minor = 0;
+
+ if (!atom_data_tbl)
+ return;
+
+ tbl_revision->major =
+ (uint32_t) GET_DATA_TABLE_MAJOR_REVISION(atom_data_tbl);
+ tbl_revision->minor =
+ (uint32_t) GET_DATA_TABLE_MINOR_REVISION(atom_data_tbl);
+}
+
+static uint32_t signal_to_ss_id(enum as_signal_type signal)
+{
+ uint32_t clk_id_ss = 0;
+
+ switch (signal) {
+ case AS_SIGNAL_TYPE_DVI:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_TMDS;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_HDMI;
+ break;
+ case AS_SIGNAL_TYPE_LVDS:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_LVDS;
+ break;
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ clk_id_ss = ASIC_INTERNAL_SS_ON_DP;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ clk_id_ss = ASIC_INTERNAL_GPUPLL_SS;
+ break;
+ default:
+ break;
+ }
+ return clk_id_ss;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+ enum dal_device_type device_type = device_id.device_type;
+ uint32_t enum_id = device_id.enum_id;
+
+ switch (device_type) {
+ case DEVICE_TYPE_LCD:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_LCD1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_LCD2_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_CRT:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_CRT1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_CRT2_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_DFP:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_DFP1_SUPPORT;
+ case 2:
+ return ATOM_DEVICE_DFP2_SUPPORT;
+ case 3:
+ return ATOM_DEVICE_DFP3_SUPPORT;
+ case 4:
+ return ATOM_DEVICE_DFP4_SUPPORT;
+ case 5:
+ return ATOM_DEVICE_DFP5_SUPPORT;
+ case 6:
+ return ATOM_DEVICE_DFP6_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_CV:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_CV_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_TV:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DEVICE_TV1_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ };
+
+ /* Unidentified device ID, return empty support mask. */
+ return 0;
+}
+
+/**
+ * HwContext interface for writing MM registers
+ */
+
+static bool i2c_read(
+ struct bios_parser *bp,
+ struct graphics_object_i2c_info *i2c_info,
+ uint8_t *buffer,
+ uint32_t length)
+{
+ struct ddc *ddc;
+ uint8_t offset[2] = { 0, 0 };
+ bool result = false;
+ struct i2c_command cmd;
+ struct gpio_ddc_hw_info hw_info = {
+ i2c_info->i2c_hw_assist,
+ i2c_info->i2c_line };
+
+ ddc = dal_gpio_create_ddc(bp->base.ctx->gpio_service,
+ i2c_info->gpio_info.clk_a_register_index,
+ (1 << i2c_info->gpio_info.clk_a_shift), &hw_info);
+
+ if (!ddc)
+ return result;
+
+ /*Using SW engine */
+ cmd.engine = I2C_COMMAND_ENGINE_SW;
+ cmd.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ {
+ struct i2c_payload payloads[] = {
+ {
+ .address = i2c_info->i2c_slave_address >> 1,
+ .data = offset,
+ .length = sizeof(offset),
+ .write = true
+ },
+ {
+ .address = i2c_info->i2c_slave_address >> 1,
+ .data = buffer,
+ .length = length,
+ .write = false
+ }
+ };
+
+ cmd.payloads = payloads;
+ cmd.number_of_payloads = ARRAY_SIZE(payloads);
+
+ /* TODO route this through drm i2c_adapter */
+ result = dal_i2caux_submit_i2c_command(
+ ddc->ctx->i2caux,
+ ddc,
+ &cmd);
+ }
+
+ dal_gpio_destroy_ddc(&ddc);
+
+ return result;
+}
+
+/**
+ * Read external display connection info table through i2c.
+ * validate the GUID and checksum.
+ *
+ * @return enum bp_result whether all data was sucessfully read
+ */
+static enum bp_result get_ext_display_connection_info(
+ struct bios_parser *bp,
+ ATOM_OBJECT *opm_object,
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *ext_display_connection_info_tbl)
+{
+ bool config_tbl_present = false;
+ ATOM_I2C_RECORD *i2c_record = NULL;
+ uint32_t i = 0;
+
+ if (opm_object == NULL)
+ return BP_RESULT_BADINPUT;
+
+ i2c_record = get_i2c_record(bp, opm_object);
+
+ if (i2c_record != NULL) {
+ ATOM_GPIO_I2C_INFO *gpio_i2c_header;
+ struct graphics_object_i2c_info i2c_info;
+
+ gpio_i2c_header = GET_IMAGE(ATOM_GPIO_I2C_INFO,
+ bp->master_data_tbl->ListOfDataTables.GPIO_I2C_Info);
+
+ if (NULL == gpio_i2c_header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (get_gpio_i2c_info(bp, i2c_record, &i2c_info) !=
+ BP_RESULT_OK)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (i2c_read(bp,
+ &i2c_info,
+ (uint8_t *)ext_display_connection_info_tbl,
+ sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO))) {
+ config_tbl_present = true;
+ }
+ }
+
+ /* Validate GUID */
+ if (config_tbl_present)
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; i++) {
+ if (ext_display_connection_info_tbl->ucGuid[i]
+ != ext_display_connection_guid[i]) {
+ config_tbl_present = false;
+ break;
+ }
+ }
+
+ /* Validate checksum */
+ if (config_tbl_present) {
+ uint8_t check_sum = 0;
+ uint8_t *buf =
+ (uint8_t *)ext_display_connection_info_tbl;
+
+ for (i = 0; i < sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO);
+ i++) {
+ check_sum += buf[i];
+ }
+
+ if (check_sum != 0)
+ config_tbl_present = false;
+ }
+
+ if (config_tbl_present)
+ return BP_RESULT_OK;
+ else
+ return BP_RESULT_FAILURE;
+}
+
+/*
+ * Gets the first device ID in the same group as the given ID for enumerating.
+ * For instance, if any DFP device ID is passed, returns the device ID for DFP1.
+ *
+ * The first device ID in the same group as the passed device ID, or 0 if no
+ * matching device group found.
+ */
+static uint32_t enum_first_device_id(uint32_t dev_id)
+{
+ /* Return the first in the group that this ID belongs to. */
+ if (dev_id & ATOM_DEVICE_CRT_SUPPORT)
+ return ATOM_DEVICE_CRT1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_DFP_SUPPORT)
+ return ATOM_DEVICE_DFP1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_LCD_SUPPORT)
+ return ATOM_DEVICE_LCD1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_TV_SUPPORT)
+ return ATOM_DEVICE_TV1_SUPPORT;
+ else if (dev_id & ATOM_DEVICE_CV_SUPPORT)
+ return ATOM_DEVICE_CV_SUPPORT;
+
+ /* No group found for this device ID. */
+
+ dm_error("%s: incorrect input %d\n", __func__, dev_id);
+ /* No matching support flag for given device ID */
+ return 0;
+}
+
+/*
+ * Gets the next device ID in the group for a given device ID.
+ *
+ * The current device ID being enumerated on.
+ *
+ * The next device ID in the group, or 0 if no device exists.
+ */
+static uint32_t enum_next_dev_id(uint32_t dev_id)
+{
+ /* Get next device ID in the group. */
+ switch (dev_id) {
+ case ATOM_DEVICE_CRT1_SUPPORT:
+ return ATOM_DEVICE_CRT2_SUPPORT;
+ case ATOM_DEVICE_LCD1_SUPPORT:
+ return ATOM_DEVICE_LCD2_SUPPORT;
+ case ATOM_DEVICE_DFP1_SUPPORT:
+ return ATOM_DEVICE_DFP2_SUPPORT;
+ case ATOM_DEVICE_DFP2_SUPPORT:
+ return ATOM_DEVICE_DFP3_SUPPORT;
+ case ATOM_DEVICE_DFP3_SUPPORT:
+ return ATOM_DEVICE_DFP4_SUPPORT;
+ case ATOM_DEVICE_DFP4_SUPPORT:
+ return ATOM_DEVICE_DFP5_SUPPORT;
+ case ATOM_DEVICE_DFP5_SUPPORT:
+ return ATOM_DEVICE_DFP6_SUPPORT;
+ }
+
+ /* Done enumerating through devices. */
+ return 0;
+}
+
+/*
+ * Returns the new device tag record for patched BIOS object.
+ *
+ * [IN] pExtDisplayPath - External display path to copy device tag from.
+ * [IN] deviceSupport - Bit vector for device ID support flags.
+ * [OUT] pDeviceTag - Device tag structure to fill with patched data.
+ *
+ * True if a compatible device ID was found, false otherwise.
+ */
+static bool get_patched_device_tag(
+ struct bios_parser *bp,
+ EXT_DISPLAY_PATH *ext_display_path,
+ uint32_t device_support,
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag)
+{
+ uint32_t dev_id;
+ /* Use fallback behaviour if not supported. */
+ if (!bp->remap_device_tags) {
+ device_tag->ulACPIDeviceEnum =
+ cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+ device_tag->usDeviceID =
+ cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceTag));
+ return true;
+ }
+
+ /* Find the first unused in the same group. */
+ dev_id = enum_first_device_id(le16_to_cpu(ext_display_path->usDeviceTag));
+ while (dev_id != 0) {
+ /* Assign this device ID if supported. */
+ if ((device_support & dev_id) != 0) {
+ device_tag->ulACPIDeviceEnum =
+ cpu_to_le32((uint32_t) le16_to_cpu(ext_display_path->usDeviceACPIEnum));
+ device_tag->usDeviceID = cpu_to_le16((USHORT) dev_id);
+ return true;
+ }
+
+ dev_id = enum_next_dev_id(dev_id);
+ }
+
+ /* No compatible device ID found. */
+ return false;
+}
+
+/*
+ * Adds a device tag to a BIOS object's device tag record if there is
+ * matching device ID supported.
+ *
+ * pObject - Pointer to the BIOS object to add the device tag to.
+ * pExtDisplayPath - Display path to retrieve base device ID from.
+ * pDeviceSupport - Pointer to bit vector for supported device IDs.
+ */
+static void add_device_tag_from_ext_display_path(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object,
+ EXT_DISPLAY_PATH *ext_display_path,
+ uint32_t *device_support)
+{
+ /* Get device tag record for object. */
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag = NULL;
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *device_tag_record = NULL;
+ enum bp_result result =
+ bios_parser_get_device_tag_record(
+ bp, object, &device_tag_record);
+
+ if ((le16_to_cpu(ext_display_path->usDeviceTag) != CONNECTOR_OBJECT_ID_NONE)
+ && (result == BP_RESULT_OK)) {
+ uint8_t index;
+
+ if ((device_tag_record->ucNumberOfDevice == 1) &&
+ (le16_to_cpu(device_tag_record->asDeviceTag[0].usDeviceID) == 0)) {
+ /*Workaround bug in current VBIOS releases where
+ * ucNumberOfDevice = 1 but there is no actual device
+ * tag data. This w/a is temporary until the updated
+ * VBIOS is distributed. */
+ device_tag_record->ucNumberOfDevice =
+ device_tag_record->ucNumberOfDevice - 1;
+ }
+
+ /* Attempt to find a matching device ID. */
+ index = device_tag_record->ucNumberOfDevice;
+ device_tag = &device_tag_record->asDeviceTag[index];
+ if (get_patched_device_tag(
+ bp,
+ ext_display_path,
+ *device_support,
+ device_tag)) {
+ /* Update cached device support to remove assigned ID.
+ */
+ *device_support &= ~le16_to_cpu(device_tag->usDeviceID);
+ device_tag_record->ucNumberOfDevice++;
+ }
+ }
+}
+
+/*
+ * Read out a single EXT_DISPLAY_PATH from the external display connection info
+ * table. The specific entry in the table is determined by the enum_id passed
+ * in.
+ *
+ * EXT_DISPLAY_PATH describing a single Configuration table entry
+ */
+
+#define INVALID_CONNECTOR 0xffff
+
+static EXT_DISPLAY_PATH *get_ext_display_path_entry(
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO *config_table,
+ uint32_t bios_object_id)
+{
+ EXT_DISPLAY_PATH *ext_display_path;
+ uint32_t ext_display_path_index =
+ ((bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT) - 1;
+
+ if (ext_display_path_index >= MAX_NUMBER_OF_EXT_DISPLAY_PATH)
+ return NULL;
+
+ ext_display_path = &config_table->sPath[ext_display_path_index];
+
+ if (le16_to_cpu(ext_display_path->usDeviceConnector) == INVALID_CONNECTOR)
+ ext_display_path->usDeviceConnector = cpu_to_le16(0);
+
+ return ext_display_path;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_AUXDDC_LUT_RECORD *get_ext_connector_aux_ddc_lut_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ 0 == header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE ==
+ header->ucRecordType &&
+ sizeof(ATOM_CONNECTOR_AUXDDC_LUT_RECORD) <=
+ header->ucRecordSize)
+ return (ATOM_CONNECTOR_AUXDDC_LUT_RECORD *)(header);
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/*
+ * Get AUX/DDC information of input object id
+ *
+ * search all records to find the ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE record
+ * IR
+ */
+static ATOM_CONNECTOR_HPDPIN_LUT_RECORD *get_ext_connector_hpd_pin_lut_record(
+ struct bios_parser *bp,
+ ATOM_OBJECT *object)
+{
+ uint32_t offset;
+ ATOM_COMMON_RECORD_HEADER *header;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER();
+ /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->usRecordOffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(ATOM_COMMON_RECORD_HEADER, offset);
+
+ if (!header)
+ return NULL;
+
+ if (LAST_RECORD_TYPE == header->ucRecordType ||
+ 0 == header->ucRecordSize)
+ break;
+
+ if (ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE ==
+ header->ucRecordType &&
+ sizeof(ATOM_CONNECTOR_HPDPIN_LUT_RECORD) <=
+ header->ucRecordSize)
+ return (ATOM_CONNECTOR_HPDPIN_LUT_RECORD *)header;
+
+ offset += header->ucRecordSize;
+ }
+
+ return NULL;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table. This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module). With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+static enum bp_result patch_bios_image_from_ext_display_connection_info(
+ struct bios_parser *bp)
+{
+ ATOM_OBJECT_TABLE *connector_tbl;
+ uint32_t connector_tbl_offset;
+ struct graphics_object_id object_id;
+ ATOM_OBJECT *object;
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO ext_display_connection_info_tbl;
+ EXT_DISPLAY_PATH *ext_display_path;
+ ATOM_CONNECTOR_AUXDDC_LUT_RECORD *aux_ddc_lut_record = NULL;
+ ATOM_I2C_RECORD *i2c_record = NULL;
+ ATOM_CONNECTOR_HPDPIN_LUT_RECORD *hpd_pin_lut_record = NULL;
+ ATOM_HPD_INT_RECORD *hpd_record = NULL;
+ ATOM_OBJECT_TABLE *encoder_table;
+ uint32_t encoder_table_offset;
+ ATOM_OBJECT *opm_object = NULL;
+ uint32_t i = 0;
+ struct graphics_object_id opm_object_id =
+ dal_graphics_object_id_init(
+ GENERIC_ID_MXM_OPM,
+ ENUM_ID_1,
+ OBJECT_TYPE_GENERIC);
+ ATOM_CONNECTOR_DEVICE_TAG_RECORD *dev_tag_record;
+ uint32_t cached_device_support =
+ le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport);
+
+ uint32_t dst_number;
+ uint16_t *dst_object_id_list;
+
+ opm_object = get_bios_object(bp, opm_object_id);
+ if (!opm_object)
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(&ext_display_connection_info_tbl, 0,
+ sizeof(ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO));
+
+ connector_tbl_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Read Connector info table from EEPROM through i2c */
+ if (get_ext_display_connection_info(bp,
+ opm_object,
+ &ext_display_connection_info_tbl) != BP_RESULT_OK) {
+
+ dm_logger_write(bp->base.ctx->logger, LOG_WARNING,
+ "%s: Failed to read Connection Info Table", __func__);
+ return BP_RESULT_UNSUPPORTED;
+ }
+
+ /* Get pointer to AUX/DDC and HPD LUTs */
+ aux_ddc_lut_record =
+ get_ext_connector_aux_ddc_lut_record(bp, opm_object);
+ hpd_pin_lut_record =
+ get_ext_connector_hpd_pin_lut_record(bp, opm_object);
+
+ if ((aux_ddc_lut_record == NULL) || (hpd_pin_lut_record == NULL))
+ return BP_RESULT_UNSUPPORTED;
+
+ /* Cache support bits for currently unmapped device types. */
+ if (bp->remap_device_tags) {
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; ++i) {
+ uint32_t j;
+ /* Remove support for all non-MXM connectors. */
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(
+ le16_to_cpu(object->usObjectID));
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM == object_id.id))
+ continue;
+
+ /* Remove support for all device tags. */
+ if (bios_parser_get_device_tag_record(
+ bp, object, &dev_tag_record) != BP_RESULT_OK)
+ continue;
+
+ for (j = 0; j < dev_tag_record->ucNumberOfDevice; ++j) {
+ ATOM_CONNECTOR_DEVICE_TAG *device_tag =
+ &dev_tag_record->asDeviceTag[j];
+ cached_device_support &=
+ ~le16_to_cpu(device_tag->usDeviceID);
+ }
+ }
+ }
+
+ /* Find all MXM connector objects and patch them with connector info
+ * from the external display connection info table. */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ uint32_t j;
+
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM != object_id.id))
+ continue;
+
+ /* Get the correct connection info table entry based on the enum
+ * id. */
+ ext_display_path = get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ le16_to_cpu(object->usObjectID));
+ if (!ext_display_path)
+ return BP_RESULT_FAILURE;
+
+ /* Patch device connector ID */
+ object->usObjectID =
+ cpu_to_le16(le16_to_cpu(ext_display_path->usDeviceConnector));
+
+ /* Patch device tag, ulACPIDeviceEnum. */
+ add_device_tag_from_ext_display_path(
+ bp,
+ object,
+ ext_display_path,
+ &cached_device_support);
+
+ /* Patch HPD info */
+ if (ext_display_path->ucExtHPDPINLutIndex <
+ MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES) {
+ hpd_record = get_hpd_record(bp, object);
+ if (hpd_record) {
+ uint8_t index =
+ ext_display_path->ucExtHPDPINLutIndex;
+ hpd_record->ucHPDIntGPIOID =
+ hpd_pin_lut_record->ucHPDPINMap[index];
+ } else {
+ BREAK_TO_DEBUGGER();
+ /* Invalid hpd record */
+ return BP_RESULT_FAILURE;
+ }
+ }
+
+ /* Patch I2C/AUX info */
+ if (ext_display_path->ucExtHPDPINLutIndex <
+ MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES) {
+ i2c_record = get_i2c_record(bp, object);
+ if (i2c_record) {
+ uint8_t index =
+ ext_display_path->ucExtAUXDDCLutIndex;
+ i2c_record->sucI2cId =
+ aux_ddc_lut_record->ucAUXDDCMap[index];
+ } else {
+ BREAK_TO_DEBUGGER();
+ /* Invalid I2C record */
+ return BP_RESULT_FAILURE;
+ }
+ }
+
+ /* Merge with other MXM connectors that map to the same physical
+ * connector. */
+ for (j = i + 1;
+ j < connector_tbl->ucNumberOfObjects; j++) {
+ ATOM_OBJECT *next_object;
+ struct graphics_object_id next_object_id;
+ EXT_DISPLAY_PATH *next_ext_display_path;
+
+ next_object = &connector_tbl->asObjects[j];
+ next_object_id = object_id_from_bios_object_id(
+ le16_to_cpu(next_object->usObjectID));
+
+ if ((OBJECT_TYPE_CONNECTOR != next_object_id.type) &&
+ (CONNECTOR_ID_MXM == next_object_id.id))
+ continue;
+
+ next_ext_display_path = get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ le16_to_cpu(next_object->usObjectID));
+
+ if (next_ext_display_path == NULL)
+ return BP_RESULT_FAILURE;
+
+ /* Merge if using same connector. */
+ if ((le16_to_cpu(next_ext_display_path->usDeviceConnector) ==
+ le16_to_cpu(ext_display_path->usDeviceConnector)) &&
+ (le16_to_cpu(ext_display_path->usDeviceConnector) != 0)) {
+ /* Clear duplicate connector from table. */
+ next_object->usObjectID = cpu_to_le16(0);
+ add_device_tag_from_ext_display_path(
+ bp,
+ object,
+ ext_display_path,
+ &cached_device_support);
+ }
+ }
+ }
+
+ /* Find all encoders which have an MXM object as their destination.
+ * Replace the MXM object with the real connector Id from the external
+ * display connection info table */
+
+ encoder_table_offset = bp->object_info_tbl_offset
+ + le16_to_cpu(bp->object_info_tbl.v1_1->usEncoderObjectTableOffset);
+ encoder_table = GET_IMAGE(ATOM_OBJECT_TABLE, encoder_table_offset);
+
+ for (i = 0; i < encoder_table->ucNumberOfObjects; i++) {
+ uint32_t j;
+
+ object = &encoder_table->asObjects[i];
+
+ dst_number = get_dest_obj_list(bp, object, &dst_object_id_list);
+
+ for (j = 0; j < dst_number; j++) {
+ object_id = object_id_from_bios_object_id(
+ dst_object_id_list[j]);
+
+ if ((OBJECT_TYPE_CONNECTOR != object_id.type) ||
+ (CONNECTOR_ID_MXM != object_id.id))
+ continue;
+
+ /* Get the correct connection info table entry based on
+ * the enum id. */
+ ext_display_path =
+ get_ext_display_path_entry(
+ &ext_display_connection_info_tbl,
+ dst_object_id_list[j]);
+
+ if (ext_display_path == NULL)
+ return BP_RESULT_FAILURE;
+
+ dst_object_id_list[j] =
+ le16_to_cpu(ext_display_path->usDeviceConnector);
+ }
+ }
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * Check whether we need to patch the VBIOS connector info table with
+ * data from an external display connection info table. This is
+ * necessary to support MXM boards with an OPM (output personality
+ * module). With these designs, the VBIOS connector info table
+ * specifies an MXM_CONNECTOR with a unique ID. The driver retrieves
+ * the external connection info table through i2c and then looks up the
+ * connector ID to find the real connector type (e.g. DFP1).
+ *
+ */
+
+static void process_ext_display_connection_info(struct bios_parser *bp)
+{
+ ATOM_OBJECT_TABLE *connector_tbl;
+ uint32_t connector_tbl_offset;
+ struct graphics_object_id object_id;
+ ATOM_OBJECT *object;
+ bool mxm_connector_found = false;
+ bool null_entry_found = false;
+ uint32_t i = 0;
+
+ connector_tbl_offset = bp->object_info_tbl_offset +
+ le16_to_cpu(bp->object_info_tbl.v1_1->usConnectorObjectTableOffset);
+ connector_tbl = GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Look for MXM connectors to determine whether we need patch the VBIOS
+ * connector info table. Look for null entries to determine whether we
+ * need to compact connector table. */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(le16_to_cpu(object->usObjectID));
+
+ if ((OBJECT_TYPE_CONNECTOR == object_id.type) &&
+ (CONNECTOR_ID_MXM == object_id.id)) {
+ /* Once we found MXM connector - we can break */
+ mxm_connector_found = true;
+ break;
+ } else if (OBJECT_TYPE_CONNECTOR != object_id.type) {
+ /* We need to continue looping - to check if MXM
+ * connector present */
+ null_entry_found = true;
+ }
+ }
+
+ /* Patch BIOS image */
+ if (mxm_connector_found || null_entry_found) {
+ uint32_t connectors_num = 0;
+ uint8_t *original_bios;
+ /* Step 1: Replace bios image with the new copy which will be
+ * patched */
+ bp->base.bios_local_image = kzalloc(bp->base.bios_size,
+ GFP_KERNEL);
+ if (bp->base.bios_local_image == NULL) {
+ BREAK_TO_DEBUGGER();
+ /* Failed to alloc bp->base.bios_local_image */
+ return;
+ }
+
+ memmove(bp->base.bios_local_image, bp->base.bios, bp->base.bios_size);
+ original_bios = bp->base.bios;
+ bp->base.bios = bp->base.bios_local_image;
+ connector_tbl =
+ GET_IMAGE(ATOM_OBJECT_TABLE, connector_tbl_offset);
+
+ /* Step 2: (only if MXM connector found) Patch BIOS image with
+ * info from external module */
+ if (mxm_connector_found &&
+ patch_bios_image_from_ext_display_connection_info(bp) !=
+ BP_RESULT_OK) {
+ /* Patching the bios image has failed. We will copy
+ * again original image provided and afterwards
+ * only remove null entries */
+ memmove(
+ bp->base.bios_local_image,
+ original_bios,
+ bp->base.bios_size);
+ }
+
+ /* Step 3: Compact connector table (remove null entries, valid
+ * entries moved to beginning) */
+ for (i = 0; i < connector_tbl->ucNumberOfObjects; i++) {
+ object = &connector_tbl->asObjects[i];
+ object_id = object_id_from_bios_object_id(
+ le16_to_cpu(object->usObjectID));
+
+ if (OBJECT_TYPE_CONNECTOR != object_id.type)
+ continue;
+
+ if (i != connectors_num) {
+ memmove(
+ &connector_tbl->
+ asObjects[connectors_num],
+ object,
+ sizeof(ATOM_OBJECT));
+ }
+ ++connectors_num;
+ }
+ connector_tbl->ucNumberOfObjects = (uint8_t)connectors_num;
+ }
+}
+
+static void bios_parser_post_init(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ process_ext_display_connection_info(bp);
+}
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ * update critical state bit in VBIOS scratch register
+ *
+ * @param
+ * bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+ struct dc_bios *dcb,
+ bool state)
+{
+ bios_set_scratch_critical_state(dcb, state);
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v8(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_8 *info_v8;
+ uint32_t i;
+
+ info_v8 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_8,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ if (info_v8 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+ info->boot_up_engine_clock = le32_to_cpu(info_v8->ulBootUpEngineClock) * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v8->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v8->sDISPCLK_Voltage[i].
+ ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v8->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v8->ulBootUpReqDisplayVector);
+ info->gpu_cap_info =
+ le32_to_cpu(info_v8->ulGPUCapInfo);
+
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v8->ulSystemConfig);
+ info->cpu_cap_info = le32_to_cpu(info_v8->ulCPUCapInfo);
+ info->boot_up_nb_voltage =
+ le16_to_cpu(info_v8->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset =
+ le16_to_cpu(info_v8->usExtDispConnInfoOffset);
+ info->memory_type = info_v8->ucMemoryType;
+ info->ma_channel_number = info_v8->ucUMAChannelNumber;
+ info->gmc_restore_reset_time =
+ le32_to_cpu(info_v8->ulGMCRestoreResetTime);
+
+ info->minimum_n_clk =
+ le32_to_cpu(info_v8->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk < le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(info_v8->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v8->ulIdleNClk);
+ info->ddr_dll_power_up_time =
+ le32_to_cpu(info_v8->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time =
+ le32_to_cpu(info_v8->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v8->usPCIEClkSSType);
+ info->lvds_ss_percentage =
+ le16_to_cpu(info_v8->usLvdsSSPercentage);
+ info->lvds_sspread_rate_in_10hz =
+ le16_to_cpu(info_v8->usLvdsSSpreadRateIn10Hz);
+ info->hdmi_ss_percentage =
+ le16_to_cpu(info_v8->usHDMISSPercentage);
+ info->hdmi_sspread_rate_in_10hz =
+ le16_to_cpu(info_v8->usHDMISSpreadRateIn10Hz);
+ info->dvi_ss_percentage =
+ le16_to_cpu(info_v8->usDVISSPercentage);
+ info->dvi_sspread_rate_in_10_hz =
+ le16_to_cpu(info_v8->usDVISSpreadRateIn10Hz);
+
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v8->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_misc = info_v8->ucLvdsMisc;
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v8->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v8->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v8->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v8->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v8->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v8->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v8->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v8->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v8->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v8->sAvail_SCLK[i].usVoltageID);
+ }
+
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v8->sExtDispConnInfo.ucGuid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceTag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(info_v8->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v8->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v8->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v8->sExtDispConnInfo.sPath[i].ucChannelMapping;
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v8->sExtDispConnInfo.ucChecksum;
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * get_integrated_info_v8
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v9(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info_v9;
+ uint32_t i;
+
+ info_v9 = GET_IMAGE(ATOM_INTEGRATED_SYSTEM_INFO_V1_9,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ if (!info_v9)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->boot_up_engine_clock = le32_to_cpu(info_v9->ulBootUpEngineClock) * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v9->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v9->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v9->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v9->ulBootUpReqDisplayVector);
+ info->gpu_cap_info = le32_to_cpu(info_v9->ulGPUCapInfo);
+
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v9->ulSystemConfig);
+ info->cpu_cap_info = le32_to_cpu(info_v9->ulCPUCapInfo);
+ info->boot_up_nb_voltage = le16_to_cpu(info_v9->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset = le16_to_cpu(info_v9->usExtDispConnInfoOffset);
+ info->memory_type = info_v9->ucMemoryType;
+ info->ma_channel_number = info_v9->ucUMAChannelNumber;
+ info->gmc_restore_reset_time = le32_to_cpu(info_v9->ulGMCRestoreResetTime);
+
+ info->minimum_n_clk = le32_to_cpu(info_v9->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk < le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(info_v9->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v9->ulIdleNClk);
+ info->ddr_dll_power_up_time = le32_to_cpu(info_v9->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time = le32_to_cpu(info_v9->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v9->usPCIEClkSSType);
+ info->lvds_ss_percentage = le16_to_cpu(info_v9->usLvdsSSPercentage);
+ info->lvds_sspread_rate_in_10hz = le16_to_cpu(info_v9->usLvdsSSpreadRateIn10Hz);
+ info->hdmi_ss_percentage = le16_to_cpu(info_v9->usHDMISSPercentage);
+ info->hdmi_sspread_rate_in_10hz = le16_to_cpu(info_v9->usHDMISSpreadRateIn10Hz);
+ info->dvi_ss_percentage = le16_to_cpu(info_v9->usDVISSPercentage);
+ info->dvi_sspread_rate_in_10_hz = le16_to_cpu(info_v9->usDVISSpreadRateIn10Hz);
+
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v9->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_misc = info_v9->ucLvdsMisc;
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v9->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v9->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v9->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v9->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v9->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v9->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v9->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v9->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v9->sAvail_SCLK[i].ulSupportedSCLK) * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v9->sAvail_SCLK[i].usVoltageID);
+ }
+
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v9->sExtDispConnInfo.ucGuid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceConnector));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usExtEncoderObjId));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceTag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(info_v9->sExtDispConnInfo.sPath[i].usDeviceACPIEnum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v9->sExtDispConnInfo.sPath[i].ucExtAUXDDCLutIndex;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v9->sExtDispConnInfo.sPath[i].ucExtHPDPINLutIndex;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v9->sExtDispConnInfo.sPath[i].ucChannelMapping;
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v9->sExtDispConnInfo.ucChecksum;
+
+ return BP_RESULT_OK;
+}
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+ ATOM_COMMON_TABLE_HEADER *header;
+ struct atom_data_revision revision;
+
+ if (bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo) {
+ header = GET_IMAGE(ATOM_COMMON_TABLE_HEADER,
+ bp->master_data_tbl->ListOfDataTables.IntegratedSystemInfo);
+
+ get_atom_data_table_revision(header, &revision);
+
+ /* Don't need to check major revision as they are all 1 */
+ switch (revision.minor) {
+ case 8:
+ result = get_integrated_info_v8(bp, info);
+ break;
+ case 9:
+ result = get_integrated_info_v9(bp, info);
+ break;
+ default:
+ return result;
+
+ }
+ }
+
+ /* Sort voltage table from low to high*/
+ if (result == BP_RESULT_OK) {
+ struct clock_voltage_caps temp = {0, 0};
+ uint32_t i;
+ uint32_t j;
+
+ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ for (j = i; j > 0; --j) {
+ if (
+ info->disp_clk_voltage[j].max_supported_clk <
+ info->disp_clk_voltage[j-1].max_supported_clk) {
+ /* swap j and j - 1*/
+ temp = info->disp_clk_voltage[j-1];
+ info->disp_clk_voltage[j-1] =
+ info->disp_clk_voltage[j];
+ info->disp_clk_voltage[j] = temp;
+ }
+ }
+ }
+
+ }
+
+ return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct integrated_info *info = NULL;
+
+ info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+ if (info == NULL) {
+ ASSERT_CRITICAL(0);
+ return NULL;
+ }
+
+ if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+ return info;
+
+ kfree(info);
+
+ return NULL;
+}
+
+/******************************************************************************/
+
+static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+ .get_encoder_id = bios_parser_get_encoder_id,
+
+ .get_connector_id = bios_parser_get_connector_id,
+
+ .get_dst_number = bios_parser_get_dst_number,
+
+ .get_src_obj = bios_parser_get_src_obj,
+
+ .get_dst_obj = bios_parser_get_dst_obj,
+
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+ .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+ .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+
+ .get_firmware_info = bios_parser_get_firmware_info,
+
+ .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+ .get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+ /* bios scratch register communication */
+ .is_accelerated_mode = bios_is_accelerated_mode,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+ .is_device_id_supported = bios_parser_is_device_id_supported,
+
+ /* COMMANDS */
+ .encoder_control = bios_parser_encoder_control,
+
+ .transmitter_control = bios_parser_transmitter_control,
+
+ .crt_control = bios_parser_crt_control, /* not used in DAL3. keep for now in case we need to support VGA on Bonaire */
+
+ .enable_crtc = bios_parser_enable_crtc,
+
+ .adjust_pixel_clock = bios_parser_adjust_pixel_clock,
+
+ .set_pixel_clock = bios_parser_set_pixel_clock,
+
+ .set_dce_clock = bios_parser_set_dce_clock,
+
+ .enable_spread_spectrum_on_ppll = bios_parser_enable_spread_spectrum_on_ppll,
+
+ .program_crtc_timing = bios_parser_program_crtc_timing, /* still use. should probably retire and program directly */
+
+ .crtc_source_select = bios_parser_crtc_source_select, /* still use. should probably retire and program directly */
+
+ .program_display_engine_pll = bios_parser_program_display_engine_pll,
+
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+ /* SW init and patch */
+ .post_init = bios_parser_post_init, /* patch vbios table for mxm module by reading i2c */
+
+ .bios_parser_destroy = bios_parser_destroy,
+};
+
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ uint16_t *rom_header_offset = NULL;
+ ATOM_ROM_HEADER *rom_header = NULL;
+ ATOM_OBJECT_HEADER *object_info_tbl;
+ struct atom_data_revision tbl_rev = {0};
+
+ if (!init)
+ return false;
+
+ if (!init->bios)
+ return false;
+
+ bp->base.funcs = &vbios_funcs;
+ bp->base.bios = init->bios;
+ bp->base.bios_size = bp->base.bios[BIOS_IMAGE_SIZE_OFFSET] * BIOS_IMAGE_SIZE_UNIT;
+
+ bp->base.ctx = init->ctx;
+ bp->base.bios_local_image = NULL;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_POINTER_TO_ATOM_ROM_HEADER);
+
+ if (!rom_header_offset)
+ return false;
+
+ rom_header = GET_IMAGE(ATOM_ROM_HEADER, *rom_header_offset);
+
+ if (!rom_header)
+ return false;
+
+ get_atom_data_table_revision(&rom_header->sHeader, &tbl_rev);
+ if (tbl_rev.major >= 2 && tbl_rev.minor >= 2)
+ return false;
+
+ bp->master_data_tbl =
+ GET_IMAGE(ATOM_MASTER_DATA_TABLE,
+ rom_header->usMasterDataTableOffset);
+
+ if (!bp->master_data_tbl)
+ return false;
+
+ bp->object_info_tbl_offset = DATA_TABLES(Object_Header);
+
+ if (!bp->object_info_tbl_offset)
+ return false;
+
+ object_info_tbl =
+ GET_IMAGE(ATOM_OBJECT_HEADER, bp->object_info_tbl_offset);
+
+ if (!object_info_tbl)
+ return false;
+
+ get_atom_data_table_revision(&object_info_tbl->sHeader,
+ &bp->object_info_tbl.revision);
+
+ if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 3) {
+ ATOM_OBJECT_HEADER_V3 *tbl_v3;
+
+ tbl_v3 = GET_IMAGE(ATOM_OBJECT_HEADER_V3,
+ bp->object_info_tbl_offset);
+ if (!tbl_v3)
+ return false;
+
+ bp->object_info_tbl.v1_3 = tbl_v3;
+ } else if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 1)
+ bp->object_info_tbl.v1_1 = object_info_tbl;
+ else
+ return false;
+
+ dal_bios_parser_init_cmd_tbl(bp);
+ dal_bios_parser_init_cmd_tbl_helper(&bp->cmd_helper, dce_version);
+
+ bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+ return true;
+}
+
+/******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
new file mode 100644
index 000000000000..d6f16275048f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_H__
+#define __DAL_BIOS_PARSER_H__
+
+struct dc_bios *bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
new file mode 100644
index 000000000000..1ee1717f2e6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -0,0 +1,1934 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "dc_bios_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/bios_parser_interface.h"
+#include "include/i2caux_interface.h"
+#include "include/logger_interface.h"
+
+#include "command_table2.h"
+
+#include "bios_parser_helper.h"
+#include "command_table_helper2.h"
+#include "bios_parser2.h"
+#include "bios_parser_types_internal2.h"
+#include "bios_parser_interface.h"
+
+#include "bios_parser_common.h"
+#define LAST_RECORD_TYPE 0xff
+
+
+struct i2c_id_config_access {
+ uint8_t bfI2C_LineMux:4;
+ uint8_t bfHW_EngineID:3;
+ uint8_t bfHW_Capable:1;
+ uint8_t ucAccess;
+};
+
+static enum bp_result get_gpio_i2c_info(struct bios_parser *bp,
+ struct atom_i2c_record *record,
+ struct graphics_object_i2c_info *info);
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info);
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info);
+
+static enum bp_result get_firmware_info_v3_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
+static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object);
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object);
+
+#define BIOS_IMAGE_SIZE_OFFSET 2
+#define BIOS_IMAGE_SIZE_UNIT 512
+
+#define DATA_TABLES(table) (bp->master_data_tbl->listOfdatatables.table)
+
+
+static void destruct(struct bios_parser *bp)
+{
+ kfree(bp->base.bios_local_image);
+ kfree(bp->base.integrated_info);
+}
+
+static void firmware_parser_destroy(struct dc_bios **dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(*dcb);
+
+ if (!bp) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ destruct(bp);
+
+ kfree(bp);
+ *dcb = NULL;
+}
+
+static void get_atom_data_table_revision(
+ struct atom_common_table_header *atom_data_tbl,
+ struct atom_data_revision *tbl_revision)
+{
+ if (!tbl_revision)
+ return;
+
+ /* initialize the revision to 0 which is invalid revision */
+ tbl_revision->major = 0;
+ tbl_revision->minor = 0;
+
+ if (!atom_data_tbl)
+ return;
+
+ tbl_revision->major =
+ (uint32_t) atom_data_tbl->format_revision & 0x3f;
+ tbl_revision->minor =
+ (uint32_t) atom_data_tbl->content_revision & 0x3f;
+}
+
+/* BIOS oject table displaypath is per connector.
+ * There is extra path not for connector. BIOS fill its encoderid as 0
+ */
+static uint8_t bios_parser_get_connectors_number(struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int count = 0;
+ unsigned int i;
+
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ if (bp->object_info_tbl.v1_4->display_path[i].encoderobjid != 0)
+ count++;
+ }
+ return count;
+}
+
+static struct graphics_object_id bios_parser_get_encoder_id(
+ struct dc_bios *dcb,
+ uint32_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+
+ if (bp->object_info_tbl.v1_4->number_of_path > i)
+ object_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+
+ return object_id;
+}
+
+static struct graphics_object_id bios_parser_get_connector_id(
+ struct dc_bios *dcb,
+ uint8_t i)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct graphics_object_id object_id = dal_graphics_object_id_init(
+ 0, ENUM_ID_UNKNOWN, OBJECT_TYPE_UNKNOWN);
+ struct object_info_table *tbl = &bp->object_info_tbl;
+ struct display_object_info_table_v1_4 *v1_4 = tbl->v1_4;
+
+ if (v1_4->number_of_path > i) {
+ /* If display_objid is generic object id, the encoderObj
+ * /extencoderobjId should be 0
+ */
+ if (v1_4->display_path[i].encoderobjid != 0 &&
+ v1_4->display_path[i].display_objid != 0)
+ object_id = object_id_from_bios_object_id(
+ v1_4->display_path[i].display_objid);
+ }
+
+ return object_id;
+}
+
+
+/* TODO: GetNumberOfSrc*/
+
+static uint32_t bios_parser_get_dst_number(struct dc_bios *dcb,
+ struct graphics_object_id id)
+{
+ /* connector has 1 Dest, encoder has 0 Dest */
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ return 0;
+ case OBJECT_TYPE_CONNECTOR:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/* removed getSrcObjList, getDestObjList*/
+
+
+static enum bp_result bios_parser_get_src_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int i;
+ enum bp_result bp_result = BP_RESULT_BADINPUT;
+ struct graphics_object_id obj_id = {0};
+ struct object_info_table *tbl = &bp->object_info_tbl;
+
+ if (!src_object_id)
+ return bp_result;
+
+ switch (object_id.type) {
+ /* Encoder's Source is GPU. BIOS does not provide GPU, since all
+ * displaypaths point to same GPU (0x1100). Hardcode GPU object type
+ */
+ case OBJECT_TYPE_ENCODER:
+ /* TODO: since num of src must be less than 2.
+ * If found in for loop, should break.
+ * DAL2 implementation may be changed too
+ */
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id ==
+ obj_id.enum_id) {
+ *src_object_id =
+ object_id_from_bios_object_id(0x1100);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ case OBJECT_TYPE_CONNECTOR:
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].display_objid);
+
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id == obj_id.enum_id) {
+ *src_object_id =
+ object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ default:
+ break;
+ }
+
+ return bp_result;
+}
+
+static enum bp_result bios_parser_get_dst_obj(struct dc_bios *dcb,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ unsigned int i;
+ enum bp_result bp_result = BP_RESULT_BADINPUT;
+ struct graphics_object_id obj_id = {0};
+ struct object_info_table *tbl = &bp->object_info_tbl;
+
+ if (!dest_object_id)
+ return BP_RESULT_BADINPUT;
+
+ switch (object_id.type) {
+ case OBJECT_TYPE_ENCODER:
+ /* TODO: since num of src must be less than 2.
+ * If found in for loop, should break.
+ * DAL2 implementation may be changed too
+ */
+ for (i = 0; i < tbl->v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].encoderobjid);
+ if (object_id.type == obj_id.type &&
+ object_id.id == obj_id.id &&
+ object_id.enum_id ==
+ obj_id.enum_id) {
+ *dest_object_id =
+ object_id_from_bios_object_id(
+ tbl->v1_4->display_path[i].display_objid);
+ /* break; */
+ }
+ }
+ bp_result = BP_RESULT_OK;
+ break;
+ default:
+ break;
+ }
+
+ return bp_result;
+}
+
+
+/* from graphics_object_id, find display path which includes the object_id */
+static struct atom_display_object_path_v2 *get_bios_object(
+ struct bios_parser *bp,
+ struct graphics_object_id id)
+{
+ unsigned int i;
+ struct graphics_object_id obj_id = {0};
+
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].encoderobjid);
+ if (id.type == obj_id.type &&
+ id.id == obj_id.id &&
+ id.enum_id == obj_id.enum_id)
+ return
+ &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ case OBJECT_TYPE_CONNECTOR:
+ case OBJECT_TYPE_GENERIC:
+ /* Both Generic and Connector Object ID
+ * will be stored on display_objid
+ */
+ for (i = 0; i < bp->object_info_tbl.v1_4->number_of_path; i++) {
+ obj_id = object_id_from_bios_object_id(
+ bp->object_info_tbl.v1_4->display_path[i].display_objid
+ );
+ if (id.type == obj_id.type &&
+ id.id == obj_id.id &&
+ id.enum_id == obj_id.enum_id)
+ return
+ &bp->object_info_tbl.v1_4->display_path[i];
+ }
+ default:
+ return NULL;
+ }
+}
+
+static enum bp_result bios_parser_get_i2c_info(struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info)
+{
+ uint32_t offset;
+ struct atom_display_object_path_v2 *object;
+ struct atom_common_record_header *header;
+ struct atom_i2c_record *record;
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ offset = object->disp_recordoffset + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type == ATOM_I2C_RECORD_TYPE
+ && sizeof(struct atom_i2c_record) <=
+ header->record_size) {
+ /* get the I2C info */
+ record = (struct atom_i2c_record *) header;
+
+ if (get_gpio_i2c_info(bp, record, info) ==
+ BP_RESULT_OK)
+ return BP_RESULT_OK;
+ }
+
+ offset += header->record_size;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static enum bp_result get_gpio_i2c_info(
+ struct bios_parser *bp,
+ struct atom_i2c_record *record,
+ struct graphics_object_i2c_info *info)
+{
+ struct atom_gpio_pin_lut_v2_1 *header;
+ uint32_t count = 0;
+ unsigned int table_index = 0;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* get the GPIO_I2C info */
+ if (!DATA_TABLES(gpio_pin_lut))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+ DATA_TABLES(gpio_pin_lut));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(struct atom_common_table_header) +
+ sizeof(struct atom_gpio_pin_assignment) >
+ le16_to_cpu(header->table_header.structuresize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* TODO: is version change? */
+ if (header->table_header.content_revision != 1)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* get data count */
+ count = (le16_to_cpu(header->table_header.structuresize)
+ - sizeof(struct atom_common_table_header))
+ / sizeof(struct atom_gpio_pin_assignment);
+
+ table_index = record->i2c_id & I2C_HW_LANE_MUX;
+
+ if (count < table_index) {
+ bool find_valid = false;
+
+ for (table_index = 0; table_index < count; table_index++) {
+ if (((record->i2c_id & I2C_HW_CAP) == (
+ header->gpio_pin[table_index].gpio_id &
+ I2C_HW_CAP)) &&
+ ((record->i2c_id & I2C_HW_ENGINE_ID_MASK) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_ENGINE_ID_MASK)) &&
+ ((record->i2c_id & I2C_HW_LANE_MUX) ==
+ (header->gpio_pin[table_index].gpio_id &
+ I2C_HW_LANE_MUX))) {
+ /* still valid */
+ find_valid = true;
+ break;
+ }
+ }
+ /* If we don't find the entry that we are looking for then
+ * we will return BP_Result_BadBiosTable.
+ */
+ if (find_valid == false)
+ return BP_RESULT_BADBIOSTABLE;
+ }
+
+ /* get the GPIO_I2C_INFO */
+ info->i2c_hw_assist = (record->i2c_id & I2C_HW_CAP) ? true : false;
+ info->i2c_line = record->i2c_id & I2C_HW_LANE_MUX;
+ info->i2c_engine_id = (record->i2c_id & I2C_HW_ENGINE_ID_MASK) >> 4;
+ info->i2c_slave_address = record->i2c_slave_addr;
+
+ /* TODO: check how to get register offset for en, Y, etc. */
+ info->gpio_info.clk_a_register_index =
+ le16_to_cpu(
+ header->gpio_pin[table_index].data_a_reg_index);
+ info->gpio_info.clk_a_shift =
+ header->gpio_pin[table_index].gpio_bitshift;
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_voltage_ddc_info_v4(
+ uint8_t *i2c_line,
+ uint32_t index,
+ struct atom_common_table_header *header,
+ uint8_t *address)
+{
+ enum bp_result result = BP_RESULT_NORECORD;
+ struct atom_voltage_objects_info_v4_1 *info =
+ (struct atom_voltage_objects_info_v4_1 *) address;
+
+ uint8_t *voltage_current_object =
+ (uint8_t *) (&(info->voltage_object[0]));
+
+ while ((address + le16_to_cpu(header->structuresize)) >
+ voltage_current_object) {
+ struct atom_i2c_voltage_object_v4 *object =
+ (struct atom_i2c_voltage_object_v4 *)
+ voltage_current_object;
+
+ if (object->header.voltage_mode ==
+ ATOM_INIT_VOLTAGE_REGULATOR) {
+ if (object->header.voltage_type == index) {
+ *i2c_line = object->i2c_id ^ 0x90;
+ result = BP_RESULT_OK;
+ break;
+ }
+ }
+
+ voltage_current_object +=
+ le16_to_cpu(object->header.object_size);
+ }
+ return result;
+}
+
+static enum bp_result bios_parser_get_thermal_ddc_info(
+ struct dc_bios *dcb,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct i2c_id_config_access *config;
+ struct atom_i2c_record record;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ config = (struct i2c_id_config_access *) &i2c_channel_id;
+
+ record.i2c_id = config->bfHW_Capable;
+ record.i2c_id |= config->bfI2C_LineMux;
+ record.i2c_id |= config->bfHW_EngineID;
+
+ return get_gpio_i2c_info(bp, &record, info);
+}
+
+static enum bp_result bios_parser_get_voltage_ddc_info(struct dc_bios *dcb,
+ uint32_t index,
+ struct graphics_object_i2c_info *info)
+{
+ uint8_t i2c_line = 0;
+ enum bp_result result = BP_RESULT_NORECORD;
+ uint8_t *voltage_info_address;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision = {0};
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!DATA_TABLES(voltageobject_info))
+ return result;
+
+ voltage_info_address = bios_get_image(&bp->base,
+ DATA_TABLES(voltageobject_info),
+ sizeof(struct atom_common_table_header));
+
+ header = (struct atom_common_table_header *) voltage_info_address;
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ if (revision.minor != 1)
+ break;
+ result = get_voltage_ddc_info_v4(&i2c_line, index, header,
+ voltage_info_address);
+ break;
+ }
+
+ if (result == BP_RESULT_OK)
+ result = bios_parser_get_thermal_ddc_info(dcb,
+ i2c_line, info);
+
+ return result;
+}
+
+static enum bp_result bios_parser_get_hpd_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+ struct atom_hpd_int_record *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_hpd_record(bp, object);
+
+ if (record != NULL) {
+ info->hpd_int_gpio_uid = record->pin_id;
+ info->hpd_active = record->plugin_pin_state;
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static struct atom_hpd_int_record *get_hpd_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object)
+{
+ struct atom_common_record_header *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = le16_to_cpu(object->disp_recordoffset)
+ + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return NULL;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type == ATOM_HPD_INT_RECORD_TYPE
+ && sizeof(struct atom_hpd_int_record) <=
+ header->record_size)
+ return (struct atom_hpd_int_record *) header;
+
+ offset += header->record_size;
+ }
+
+ return NULL;
+}
+
+/**
+ * bios_parser_get_gpio_pin_info
+ * Get GpioPin information of input gpio id
+ *
+ * @param gpio_id, GPIO ID
+ * @param info, GpioPin information structure
+ * @return Bios parser result code
+ * @note
+ * to get the GPIO PIN INFO, we need:
+ * 1. get the GPIO_ID from other object table, see GetHPDInfo()
+ * 2. in DATA_TABLE.GPIO_Pin_LUT, search all records,
+ * to get the registerA offset/mask
+ */
+static enum bp_result bios_parser_get_gpio_pin_info(
+ struct dc_bios *dcb,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_gpio_pin_lut_v2_1 *header;
+ uint32_t count = 0;
+ uint32_t i = 0;
+
+ if (!DATA_TABLES(gpio_pin_lut))
+ return BP_RESULT_BADBIOSTABLE;
+
+ header = GET_IMAGE(struct atom_gpio_pin_lut_v2_1,
+ DATA_TABLES(gpio_pin_lut));
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (sizeof(struct atom_common_table_header) +
+ sizeof(struct atom_gpio_pin_lut_v2_1)
+ > le16_to_cpu(header->table_header.structuresize))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (header->table_header.content_revision != 1)
+ return BP_RESULT_UNSUPPORTED;
+
+ /* Temporary hard code gpio pin info */
+#if defined(FOR_SIMNOW_BOOT)
+ {
+ struct atom_gpio_pin_assignment gpio_pin[8] = {
+ {0x5db5, 0, 0, 1, 0},
+ {0x5db5, 8, 8, 2, 0},
+ {0x5db5, 0x10, 0x10, 3, 0},
+ {0x5db5, 0x18, 0x14, 4, 0},
+ {0x5db5, 0x1A, 0x18, 5, 0},
+ {0x5db5, 0x1C, 0x1C, 6, 0},
+ };
+
+ count = 6;
+ memmove(header->gpio_pin, gpio_pin, sizeof(gpio_pin));
+ }
+#else
+ count = (le16_to_cpu(header->table_header.structuresize)
+ - sizeof(struct atom_common_table_header))
+ / sizeof(struct atom_gpio_pin_assignment);
+#endif
+ for (i = 0; i < count; ++i) {
+ if (header->gpio_pin[i].gpio_id != gpio_id)
+ continue;
+
+ info->offset =
+ (uint32_t) le16_to_cpu(
+ header->gpio_pin[i].data_a_reg_index);
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask = (uint32_t) (1 <<
+ header->gpio_pin[i].gpio_bitshift);
+ info->mask_y = info->mask + 2;
+ info->mask_en = info->mask + 1;
+ info->mask_mask = info->mask - 1;
+
+ return BP_RESULT_OK;
+ }
+
+ return BP_RESULT_NORECORD;
+}
+
+static struct device_id device_type_from_device_id(uint16_t device_id)
+{
+
+ struct device_id result_device_id;
+
+ result_device_id.raw_device_tag = device_id;
+
+ switch (device_id) {
+ case ATOM_DISPLAY_LCD1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_LCD;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DISPLAY_DFP1_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 1;
+ break;
+
+ case ATOM_DISPLAY_DFP2_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 2;
+ break;
+
+ case ATOM_DISPLAY_DFP3_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 3;
+ break;
+
+ case ATOM_DISPLAY_DFP4_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 4;
+ break;
+
+ case ATOM_DISPLAY_DFP5_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 5;
+ break;
+
+ case ATOM_DISPLAY_DFP6_SUPPORT:
+ result_device_id.device_type = DEVICE_TYPE_DFP;
+ result_device_id.enum_id = 6;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid device Id */
+ result_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+ result_device_id.enum_id = 0;
+ }
+ return result_device_id;
+}
+
+static enum bp_result bios_parser_get_device_tag(
+ struct dc_bios *dcb,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ /* getBiosObject will return MXM object */
+ object = get_bios_object(bp, connector_object_id);
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object id */
+ return BP_RESULT_BADINPUT;
+ }
+
+ info->acpi_device = 0; /* BIOS no longer provides this */
+ info->dev_id = device_type_from_device_id(object->device_tag);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result get_ss_info_v4_1(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ ss_info->spread_percentage_divider = 1000;
+ /* BIOS no longer uses target clock. Always enable for now */
+ ss_info->target_clock_range = 0xffffffff;
+
+ switch (id) {
+ case AS_SIGNAL_TYPE_DVI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dvi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->hdmi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ /* TODO LVDS not support anymore? */
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dp_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dp_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ /* atom_firmware: DAL only get data from dce_info table.
+ * if data within smu_info is needed for DAL, VBIOS should
+ * copy it into dce_info
+ */
+ result = BP_RESULT_UNSUPPORTED;
+ break;
+ default:
+ result = BP_RESULT_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+static enum bp_result get_ss_info_v4_2(
+ struct bios_parser *bp,
+ uint32_t id,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ enum bp_result result = BP_RESULT_OK;
+ struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL;
+ struct atom_smu_info_v3_1 *smu_info = NULL;
+
+ if (!ss_info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ if (!DATA_TABLES(smu_info))
+ return BP_RESULT_BADBIOSTABLE;
+
+ disp_cntl_tbl = GET_IMAGE(struct atom_display_controller_info_v4_2,
+ DATA_TABLES(dce_info));
+ if (!disp_cntl_tbl)
+ return BP_RESULT_BADBIOSTABLE;
+
+ smu_info = GET_IMAGE(struct atom_smu_info_v3_1, DATA_TABLES(smu_info));
+ if (!smu_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ ss_info->type.STEP_AND_DELAY_INFO = false;
+ ss_info->spread_percentage_divider = 1000;
+ /* BIOS no longer uses target clock. Always enable for now */
+ ss_info->target_clock_range = 0xffffffff;
+
+ switch (id) {
+ case AS_SIGNAL_TYPE_DVI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->dvi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->dvi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->dvi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_HDMI:
+ ss_info->spread_spectrum_percentage =
+ disp_cntl_tbl->hdmi_ss_percentage;
+ ss_info->spread_spectrum_range =
+ disp_cntl_tbl->hdmi_ss_rate_10hz * 10;
+ if (disp_cntl_tbl->hdmi_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ /* TODO LVDS not support anymore? */
+ case AS_SIGNAL_TYPE_DISPLAY_PORT:
+ ss_info->spread_spectrum_percentage =
+ smu_info->gpuclk_ss_percentage;
+ ss_info->spread_spectrum_range =
+ smu_info->gpuclk_ss_rate_10hz * 10;
+ if (smu_info->gpuclk_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
+ ss_info->type.CENTER_MODE = true;
+ break;
+ case AS_SIGNAL_TYPE_GPU_PLL:
+ /* atom_firmware: DAL only get data from dce_info table.
+ * if data within smu_info is needed for DAL, VBIOS should
+ * copy it into dce_info
+ */
+ result = BP_RESULT_UNSUPPORTED;
+ break;
+ default:
+ result = BP_RESULT_UNSUPPORTED;
+ }
+
+ return result;
+}
+
+/**
+ * bios_parser_get_spread_spectrum_info
+ * Get spread spectrum information from the ASIC_InternalSS_Info(ver 2.1 or
+ * ver 3.1) or SS_Info table from the VBIOS. Currently ASIC_InternalSS_Info
+ * ver 2.1 can co-exist with SS_Info table. Expect ASIC_InternalSS_Info
+ * ver 3.1,
+ * there is only one entry for each signal /ss id. However, there is
+ * no planning of supporting multiple spread Sprectum entry for EverGreen
+ * @param [in] this
+ * @param [in] signal, ASSignalType to be converted to info index
+ * @param [in] index, number of entries that match the converted info index
+ * @param [out] ss_info, sprectrum information structure,
+ * @return Bios parser result code
+ */
+static enum bp_result bios_parser_get_spread_spectrum_info(
+ struct dc_bios *dcb,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_UNSUPPORTED;
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!ss_info) /* check for bad input */
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(dce_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+ get_atom_data_table_revision(header, &tbl_revision);
+
+ switch (tbl_revision.major) {
+ case 4:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_ss_info_v4_1(bp, signal, index, ss_info);
+ case 2:
+ return get_ss_info_v4_2(bp, signal, index, ss_info);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ /* there can not be more then one entry for SS Info table */
+ return result;
+}
+
+static enum bp_result get_embedded_panel_info_v2_1(
+ struct bios_parser *bp,
+ struct embedded_panel_info *info)
+{
+ struct lcd_info_v2_1 *lvds;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ if (!DATA_TABLES(lcd_info))
+ return BP_RESULT_UNSUPPORTED;
+
+ lvds = GET_IMAGE(struct lcd_info_v2_1, DATA_TABLES(lcd_info));
+
+ if (!lvds)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* TODO: previous vv1_3, should v2_1 */
+ if (!((lvds->table_header.format_revision == 2)
+ && (lvds->table_header.content_revision >= 1)))
+ return BP_RESULT_UNSUPPORTED;
+
+ memset(info, 0, sizeof(struct embedded_panel_info));
+
+ /* We need to convert from 10KHz units into KHz units */
+ info->lcd_timing.pixel_clk =
+ le16_to_cpu(lvds->lcd_timing.pixclk) * 10;
+ /* usHActive does not include borders, according to VBIOS team */
+ info->lcd_timing.horizontal_addressable =
+ le16_to_cpu(lvds->lcd_timing.h_active);
+ /* usHBlanking_Time includes borders, so we should really be
+ * subtractingborders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+ info->lcd_timing.horizontal_blanking_time =
+ le16_to_cpu(lvds->lcd_timing.h_blanking_time);
+ /* usVActive does not include borders, according to VBIOS team*/
+ info->lcd_timing.vertical_addressable =
+ le16_to_cpu(lvds->lcd_timing.v_active);
+ /* usVBlanking_Time includes borders, so we should really be
+ * subtracting borders duing this translation, but LVDS generally
+ * doesn't have borders, so we should be okay leaving this as is for
+ * now. May need to revisit if we ever have LVDS with borders
+ */
+ info->lcd_timing.vertical_blanking_time =
+ le16_to_cpu(lvds->lcd_timing.v_blanking_time);
+ info->lcd_timing.horizontal_sync_offset =
+ le16_to_cpu(lvds->lcd_timing.h_sync_offset);
+ info->lcd_timing.horizontal_sync_width =
+ le16_to_cpu(lvds->lcd_timing.h_sync_width);
+ info->lcd_timing.vertical_sync_offset =
+ le16_to_cpu(lvds->lcd_timing.v_sync_offset);
+ info->lcd_timing.vertical_sync_width =
+ le16_to_cpu(lvds->lcd_timing.v_syncwidth);
+ info->lcd_timing.horizontal_border = lvds->lcd_timing.h_border;
+ info->lcd_timing.vertical_border = lvds->lcd_timing.v_border;
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
+
+ info->lcd_timing.misc_info.H_SYNC_POLARITY =
+ ~(uint32_t)
+ (lvds->lcd_timing.miscinfo & ATOM_HSYNC_POLARITY);
+ info->lcd_timing.misc_info.V_SYNC_POLARITY =
+ ~(uint32_t)
+ (lvds->lcd_timing.miscinfo & ATOM_VSYNC_POLARITY);
+
+ /* not provided by VBIOS */
+ info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
+
+ info->lcd_timing.misc_info.H_REPLICATION_BY2 =
+ !!(lvds->lcd_timing.miscinfo & ATOM_H_REPLICATIONBY2);
+ info->lcd_timing.misc_info.V_REPLICATION_BY2 =
+ !!(lvds->lcd_timing.miscinfo & ATOM_V_REPLICATIONBY2);
+ info->lcd_timing.misc_info.COMPOSITE_SYNC =
+ !!(lvds->lcd_timing.miscinfo & ATOM_COMPOSITESYNC);
+ info->lcd_timing.misc_info.INTERLACE =
+ !!(lvds->lcd_timing.miscinfo & ATOM_INTERLACE);
+
+ /* not provided by VBIOS*/
+ info->lcd_timing.misc_info.DOUBLE_CLOCK = 0;
+ /* not provided by VBIOS*/
+ info->ss_id = 0;
+
+ info->realtek_eDPToLVDS =
+ !!(lvds->dplvdsrxid == eDP_TO_LVDS_REALTEK_ID);
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_embedded_panel_info(
+ struct dc_bios *dcb,
+ struct embedded_panel_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_common_table_header *header;
+ struct atom_data_revision tbl_revision;
+
+ if (!DATA_TABLES(lcd_info))
+ return BP_RESULT_FAILURE;
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(lcd_info));
+
+ if (!header)
+ return BP_RESULT_BADBIOSTABLE;
+
+ get_atom_data_table_revision(header, &tbl_revision);
+
+
+ switch (tbl_revision.major) {
+ case 2:
+ switch (tbl_revision.minor) {
+ case 1:
+ return get_embedded_panel_info_v2_1(bp, info);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ return BP_RESULT_FAILURE;
+}
+
+static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+{
+ enum dal_device_type device_type = device_id.device_type;
+ uint32_t enum_id = device_id.enum_id;
+
+ switch (device_type) {
+ case DEVICE_TYPE_LCD:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DISPLAY_LCD1_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ case DEVICE_TYPE_DFP:
+ switch (enum_id) {
+ case 1:
+ return ATOM_DISPLAY_DFP1_SUPPORT;
+ case 2:
+ return ATOM_DISPLAY_DFP2_SUPPORT;
+ case 3:
+ return ATOM_DISPLAY_DFP3_SUPPORT;
+ case 4:
+ return ATOM_DISPLAY_DFP4_SUPPORT;
+ case 5:
+ return ATOM_DISPLAY_DFP5_SUPPORT;
+ case 6:
+ return ATOM_DISPLAY_DFP6_SUPPORT;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ };
+
+ /* Unidentified device ID, return empty support mask. */
+ return 0;
+}
+
+static bool bios_parser_is_device_id_supported(
+ struct dc_bios *dcb,
+ struct device_id id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ uint32_t mask = get_support_mask_for_device_id(id);
+
+ return (le16_to_cpu(bp->object_info_tbl.v1_4->supporteddevices) &
+ mask) != 0;
+}
+
+static void bios_parser_post_init(
+ struct dc_bios *dcb)
+{
+ /* TODO for OPM module. Need implement later */
+}
+
+static uint32_t bios_parser_get_ss_entry_number(
+ struct dc_bios *dcb,
+ enum as_signal_type signal)
+{
+ /* TODO: DAL2 atomfirmware implementation does not need this.
+ * why DAL3 need this?
+ */
+ return 1;
+}
+
+static enum bp_result bios_parser_transmitter_control(
+ struct dc_bios *dcb,
+ struct bp_transmitter_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.transmitter_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.transmitter_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_encoder_control(
+ struct dc_bios *dcb,
+ struct bp_encoder_control *cntl)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.dig_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dig_encoder_control(bp, cntl);
+}
+
+static enum bp_result bios_parser_set_pixel_clock(
+ struct dc_bios *dcb,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_pixel_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_pixel_clock(bp, bp_params);
+}
+
+static enum bp_result bios_parser_set_dce_clock(
+ struct dc_bios *dcb,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_dce_clock)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_dce_clock(bp, bp_params);
+}
+
+static unsigned int bios_parser_get_smu_clock_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.get_smu_clock_info)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.get_smu_clock_info(bp);
+}
+
+static enum bp_result bios_parser_program_crtc_timing(
+ struct dc_bios *dcb,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.set_crtc_timing)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.set_crtc_timing(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_crtc(
+ struct dc_bios *dcb,
+ enum controller_id id,
+ bool enable)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_crtc)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_crtc(bp, id, enable);
+}
+
+static enum bp_result bios_parser_crtc_source_select(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
+static enum bp_result bios_parser_enable_disp_power_gating(
+ struct dc_bios *dcb,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.enable_disp_power_gating)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.enable_disp_power_gating(bp, controller_id,
+ action);
+}
+
+static bool bios_parser_is_accelerated_mode(
+ struct dc_bios *dcb)
+{
+ return bios_is_accelerated_mode(dcb);
+}
+
+
+/**
+ * bios_parser_set_scratch_critical_state
+ *
+ * @brief
+ * update critical state bit in VBIOS scratch register
+ *
+ * @param
+ * bool - to set or reset state
+ */
+static void bios_parser_set_scratch_critical_state(
+ struct dc_bios *dcb,
+ bool state)
+{
+ bios_set_scratch_critical_state(dcb, state);
+}
+
+static enum bp_result bios_parser_get_firmware_info(
+ struct dc_bios *dcb,
+ struct dc_firmware_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+ struct atom_common_table_header *header;
+
+ struct atom_data_revision revision;
+
+ if (info && DATA_TABLES(firmwareinfo)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(firmwareinfo));
+ get_atom_data_table_revision(header, &revision);
+ switch (revision.major) {
+ case 3:
+ switch (revision.minor) {
+ case 1:
+ result = get_firmware_info_v3_1(bp, info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return result;
+}
+
+static enum bp_result get_firmware_info_v3_1(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_1 *firmware_info;
+ struct atom_display_controller_info_v4_1 *dce_info = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_1,
+ DATA_TABLES(firmwareinfo));
+
+ dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!firmware_info || !dce_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ /* Pixel clock pll information. */
+ /* We need to convert from 10KHz units into KHz units */
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+ info->default_engine_clk = firmware_info->bootup_sclk_in10khz * 10;
+
+ /* 27MHz for Vega10: */
+ info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10;
+
+ /* Hardcode frequency if BIOS gives no DCE Ref Clk */
+ if (info->pll_info.crystal_frequency == 0)
+ info->pll_info.crystal_frequency = 27000;
+ /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/
+ info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10;
+
+ /* Get GPU PLL VCO Clock */
+
+ if (bp->cmd_tbl.get_smu_clock_info != NULL) {
+ /* VBIOS gives in 10KHz */
+ info->smu_gpu_pll_output_freq =
+ bp->cmd_tbl.get_smu_clock_info(bp) * 10;
+ }
+
+ return BP_RESULT_OK;
+}
+
+static enum bp_result bios_parser_get_encoder_cap_info(
+ struct dc_bios *dcb,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct atom_display_object_path_v2 *object;
+ struct atom_encoder_caps_record *record = NULL;
+
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_encoder_cap_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->DP_HBR2_CAP = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR2) ? 1 : 0;
+ info->DP_HBR2_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR2_EN) ? 1 : 0;
+ info->DP_HBR3_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
+ info->HDMI_6GB_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+
+ return BP_RESULT_OK;
+}
+
+
+static struct atom_encoder_caps_record *get_encoder_cap_record(
+ struct bios_parser *bp,
+ struct atom_display_object_path_v2 *object)
+{
+ struct atom_common_record_header *header;
+ uint32_t offset;
+
+ if (!object) {
+ BREAK_TO_DEBUGGER(); /* Invalid object */
+ return NULL;
+ }
+
+ offset = object->encoder_recordoffset + bp->object_info_tbl_offset;
+
+ for (;;) {
+ header = GET_IMAGE(struct atom_common_record_header, offset);
+
+ if (!header)
+ return NULL;
+
+ offset += header->record_size;
+
+ if (header->record_type == LAST_RECORD_TYPE ||
+ !header->record_size)
+ break;
+
+ if (header->record_type != ATOM_ENCODER_CAP_RECORD_TYPE)
+ continue;
+
+ if (sizeof(struct atom_encoder_caps_record) <=
+ header->record_size)
+ return (struct atom_encoder_caps_record *)header;
+ }
+
+ return NULL;
+}
+
+/*
+ * get_integrated_info_v11
+ *
+ * @brief
+ * Get V8 integrated BIOS information
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result get_integrated_info_v11(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ struct atom_integrated_system_info_v1_11 *info_v11;
+ uint32_t i;
+
+ info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11,
+ DATA_TABLES(integratedsysteminfo));
+
+ if (info_v11 == NULL)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->gpu_cap_info =
+ le32_to_cpu(info_v11->gpucapinfo);
+ /*
+ * system_config: Bit[0] = 0 : PCIE power gating disabled
+ * = 1 : PCIE power gating enabled
+ * Bit[1] = 0 : DDR-PLL shut down disabled
+ * = 1 : DDR-PLL shut down enabled
+ * Bit[2] = 0 : DDR-PLL power down disabled
+ * = 1 : DDR-PLL power down enabled
+ */
+ info->system_config = le32_to_cpu(info_v11->system_config);
+ info->cpu_cap_info = le32_to_cpu(info_v11->cpucapinfo);
+ info->memory_type = info_v11->memorytype;
+ info->ma_channel_number = info_v11->umachannelnumber;
+ info->lvds_ss_percentage =
+ le16_to_cpu(info_v11->lvds_ss_percentage);
+ info->lvds_sspread_rate_in_10hz =
+ le16_to_cpu(info_v11->lvds_ss_rate_10hz);
+ info->hdmi_ss_percentage =
+ le16_to_cpu(info_v11->hdmi_ss_percentage);
+ info->hdmi_sspread_rate_in_10hz =
+ le16_to_cpu(info_v11->hdmi_ss_rate_10hz);
+ info->dvi_ss_percentage =
+ le16_to_cpu(info_v11->dvi_ss_percentage);
+ info->dvi_sspread_rate_in_10_hz =
+ le16_to_cpu(info_v11->dvi_ss_rate_10hz);
+ info->lvds_misc = info_v11->lvds_misc;
+ for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
+ info->ext_disp_conn_info.gu_id[i] =
+ info_v11->extdispconninfo.guid[i];
+ }
+
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; ++i) {
+ info->ext_disp_conn_info.path[i].device_connector_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(info_v11->extdispconninfo.path[i].connectorobjid));
+
+ info->ext_disp_conn_info.path[i].ext_encoder_obj_id =
+ object_id_from_bios_object_id(
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].ext_encoder_objid));
+
+ info->ext_disp_conn_info.path[i].device_tag =
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].device_tag);
+ info->ext_disp_conn_info.path[i].device_acpi_enum =
+ le16_to_cpu(
+ info_v11->extdispconninfo.path[i].device_acpi_enum);
+ info->ext_disp_conn_info.path[i].ext_aux_ddc_lut_index =
+ info_v11->extdispconninfo.path[i].auxddclut_index;
+ info->ext_disp_conn_info.path[i].ext_hpd_pin_lut_index =
+ info_v11->extdispconninfo.path[i].hpdlut_index;
+ info->ext_disp_conn_info.path[i].channel_mapping.raw =
+ info_v11->extdispconninfo.path[i].channelmapping;
+ info->ext_disp_conn_info.path[i].caps =
+ le16_to_cpu(info_v11->extdispconninfo.path[i].caps);
+ }
+ info->ext_disp_conn_info.checksum =
+ info_v11->extdispconninfo.checksum;
+
+ info->dp0_ext_hdmi_slv_addr = info_v11->dp0_retimer_set.HdmiSlvAddr;
+ info->dp0_ext_hdmi_reg_num = info_v11->dp0_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp0_ext_hdmi_reg_num; i++) {
+ info->dp0_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp0_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp0_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp0_ext_hdmi_6g_reg_num = info_v11->dp0_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp0_ext_hdmi_6g_reg_num; i++) {
+ info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp0_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp0_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp1_ext_hdmi_slv_addr = info_v11->dp1_retimer_set.HdmiSlvAddr;
+ info->dp1_ext_hdmi_reg_num = info_v11->dp1_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp1_ext_hdmi_reg_num; i++) {
+ info->dp1_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp1_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp1_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp1_ext_hdmi_6g_reg_num = info_v11->dp1_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp1_ext_hdmi_6g_reg_num; i++) {
+ info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp1_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp1_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp2_ext_hdmi_slv_addr = info_v11->dp2_retimer_set.HdmiSlvAddr;
+ info->dp2_ext_hdmi_reg_num = info_v11->dp2_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp2_ext_hdmi_reg_num; i++) {
+ info->dp2_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp2_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp2_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp2_ext_hdmi_6g_reg_num = info_v11->dp2_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp2_ext_hdmi_6g_reg_num; i++) {
+ info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp2_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp2_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+ info->dp3_ext_hdmi_slv_addr = info_v11->dp3_retimer_set.HdmiSlvAddr;
+ info->dp3_ext_hdmi_reg_num = info_v11->dp3_retimer_set.HdmiRegNum;
+ for (i = 0; i < info->dp3_ext_hdmi_reg_num; i++) {
+ info->dp3_ext_hdmi_reg_settings[i].i2c_reg_index =
+ info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegIndex;
+ info->dp3_ext_hdmi_reg_settings[i].i2c_reg_val =
+ info_v11->dp3_retimer_set.HdmiRegSetting[i].ucI2cRegVal;
+ }
+ info->dp3_ext_hdmi_6g_reg_num = info_v11->dp3_retimer_set.Hdmi6GRegNum;
+ for (i = 0; i < info->dp3_ext_hdmi_6g_reg_num; i++) {
+ info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_index =
+ info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegIndex;
+ info->dp3_ext_hdmi_6g_reg_settings[i].i2c_reg_val =
+ info_v11->dp3_retimer_set.Hdmi6GhzRegSetting[i].ucI2cRegVal;
+ }
+
+
+ /** TODO - review **/
+ #if 0
+ info->boot_up_engine_clock = le32_to_cpu(info_v11->ulBootUpEngineClock)
+ * 10;
+ info->dentist_vco_freq = le32_to_cpu(info_v11->ulDentistVCOFreq) * 10;
+ info->boot_up_uma_clock = le32_to_cpu(info_v8->ulBootUpUMAClock) * 10;
+
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->disp_clk_voltage[i].max_supported_clk =
+ le32_to_cpu(info_v11->sDISPCLK_Voltage[i].
+ ulMaximumSupportedCLK) * 10;
+ info->disp_clk_voltage[i].voltage_index =
+ le32_to_cpu(info_v11->sDISPCLK_Voltage[i].ulVoltageIndex);
+ }
+
+ info->boot_up_req_display_vector =
+ le32_to_cpu(info_v11->ulBootUpReqDisplayVector);
+ info->boot_up_nb_voltage =
+ le16_to_cpu(info_v11->usBootUpNBVoltage);
+ info->ext_disp_conn_info_offset =
+ le16_to_cpu(info_v11->usExtDispConnInfoOffset);
+ info->gmc_restore_reset_time =
+ le32_to_cpu(info_v11->ulGMCRestoreResetTime);
+ info->minimum_n_clk =
+ le32_to_cpu(info_v11->ulNbpStateNClkFreq[0]);
+ for (i = 1; i < 4; ++i)
+ info->minimum_n_clk =
+ info->minimum_n_clk <
+ le32_to_cpu(info_v11->ulNbpStateNClkFreq[i]) ?
+ info->minimum_n_clk : le32_to_cpu(
+ info_v11->ulNbpStateNClkFreq[i]);
+
+ info->idle_n_clk = le32_to_cpu(info_v11->ulIdleNClk);
+ info->ddr_dll_power_up_time =
+ le32_to_cpu(info_v11->ulDDR_DLL_PowerUpTime);
+ info->ddr_pll_power_up_time =
+ le32_to_cpu(info_v11->ulDDR_PLL_PowerUpTime);
+ info->pcie_clk_ss_type = le16_to_cpu(info_v11->usPCIEClkSSType);
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+ info->max_lvds_pclk_freq_in_single_link =
+ le16_to_cpu(info_v11->usMaxLVDSPclkFreqInSingleLink);
+ info->lvds_pwr_on_seq_dig_on_to_de_in_4ms =
+ info_v11->ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
+ info->lvds_pwr_on_seq_de_to_vary_bl_in_4ms =
+ info_v11->ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
+ info->lvds_pwr_on_seq_vary_bl_to_blon_in_4ms =
+ info_v11->ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
+ info->lvds_pwr_off_seq_vary_bl_to_de_in4ms =
+ info_v11->ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
+ info->lvds_pwr_off_seq_de_to_dig_on_in4ms =
+ info_v11->ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
+ info->lvds_pwr_off_seq_blon_to_vary_bl_in_4ms =
+ info_v11->ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
+ info->lvds_off_to_on_delay_in_4ms =
+ info_v11->ucLVDSOffToOnDelay_in4Ms;
+ info->lvds_bit_depth_control_val =
+ le32_to_cpu(info_v11->ulLCDBitDepthControlVal);
+
+ for (i = 0; i < NUMBER_OF_AVAILABLE_SCLK; ++i) {
+ /* Convert [10KHz] into [KHz] */
+ info->avail_s_clk[i].supported_s_clk =
+ le32_to_cpu(info_v11->sAvail_SCLK[i].ulSupportedSCLK)
+ * 10;
+ info->avail_s_clk[i].voltage_index =
+ le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageIndex);
+ info->avail_s_clk[i].voltage_id =
+ le16_to_cpu(info_v11->sAvail_SCLK[i].usVoltageID);
+ }
+ #endif /* TODO*/
+
+ return BP_RESULT_OK;
+}
+
+
+/*
+ * construct_integrated_info
+ *
+ * @brief
+ * Get integrated BIOS information based on table revision
+ *
+ * @param
+ * bios_parser *bp - [in]BIOS parser handler to get master data table
+ * integrated_info *info - [out] store and output integrated info
+ *
+ * @return
+ * enum bp_result - BP_RESULT_OK if information is available,
+ * BP_RESULT_BADBIOSTABLE otherwise.
+ */
+static enum bp_result construct_integrated_info(
+ struct bios_parser *bp,
+ struct integrated_info *info)
+{
+ enum bp_result result = BP_RESULT_BADBIOSTABLE;
+
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+
+ struct clock_voltage_caps temp = {0, 0};
+ uint32_t i;
+ uint32_t j;
+
+ if (info && DATA_TABLES(integratedsysteminfo)) {
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(integratedsysteminfo));
+
+ get_atom_data_table_revision(header, &revision);
+
+ /* Don't need to check major revision as they are all 1 */
+ switch (revision.minor) {
+ case 11:
+ result = get_integrated_info_v11(bp, info);
+ break;
+ default:
+ return result;
+ }
+ }
+
+ if (result != BP_RESULT_OK)
+ return result;
+
+ /* Sort voltage table from low to high*/
+ for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ for (j = i; j > 0; --j) {
+ if (info->disp_clk_voltage[j].max_supported_clk <
+ info->disp_clk_voltage[j-1].max_supported_clk
+ ) {
+ /* swap j and j - 1*/
+ temp = info->disp_clk_voltage[j-1];
+ info->disp_clk_voltage[j-1] =
+ info->disp_clk_voltage[j];
+ info->disp_clk_voltage[j] = temp;
+ }
+ }
+ }
+
+ return result;
+}
+
+static struct integrated_info *bios_parser_create_integrated_info(
+ struct dc_bios *dcb)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct integrated_info *info = NULL;
+
+ info = kzalloc(sizeof(struct integrated_info), GFP_KERNEL);
+
+ if (info == NULL) {
+ ASSERT_CRITICAL(0);
+ return NULL;
+ }
+
+ if (construct_integrated_info(bp, info) == BP_RESULT_OK)
+ return info;
+
+ kfree(info);
+
+ return NULL;
+}
+
+static const struct dc_vbios_funcs vbios_funcs = {
+ .get_connectors_number = bios_parser_get_connectors_number,
+
+ .get_encoder_id = bios_parser_get_encoder_id,
+
+ .get_connector_id = bios_parser_get_connector_id,
+
+ .get_dst_number = bios_parser_get_dst_number,
+
+ .get_src_obj = bios_parser_get_src_obj,
+
+ .get_dst_obj = bios_parser_get_dst_obj,
+
+ .get_i2c_info = bios_parser_get_i2c_info,
+
+ .get_voltage_ddc_info = bios_parser_get_voltage_ddc_info,
+
+ .get_thermal_ddc_info = bios_parser_get_thermal_ddc_info,
+
+ .get_hpd_info = bios_parser_get_hpd_info,
+
+ .get_device_tag = bios_parser_get_device_tag,
+
+ .get_firmware_info = bios_parser_get_firmware_info,
+
+ .get_spread_spectrum_info = bios_parser_get_spread_spectrum_info,
+
+ .get_ss_entry_number = bios_parser_get_ss_entry_number,
+
+ .get_embedded_panel_info = bios_parser_get_embedded_panel_info,
+
+ .get_gpio_pin_info = bios_parser_get_gpio_pin_info,
+
+ .get_encoder_cap_info = bios_parser_get_encoder_cap_info,
+
+ .is_device_id_supported = bios_parser_is_device_id_supported,
+
+
+
+ .is_accelerated_mode = bios_parser_is_accelerated_mode,
+
+ .set_scratch_critical_state = bios_parser_set_scratch_critical_state,
+
+
+/* COMMANDS */
+ .encoder_control = bios_parser_encoder_control,
+
+ .transmitter_control = bios_parser_transmitter_control,
+
+ .enable_crtc = bios_parser_enable_crtc,
+
+ .set_pixel_clock = bios_parser_set_pixel_clock,
+
+ .set_dce_clock = bios_parser_set_dce_clock,
+
+ .program_crtc_timing = bios_parser_program_crtc_timing,
+
+ /* .blank_crtc = bios_parser_blank_crtc, */
+
+ .crtc_source_select = bios_parser_crtc_source_select,
+
+ /* .external_encoder_control = bios_parser_external_encoder_control, */
+
+ .enable_disp_power_gating = bios_parser_enable_disp_power_gating,
+
+ .post_init = bios_parser_post_init,
+
+ .bios_parser_destroy = firmware_parser_destroy,
+
+ .get_smu_clock_info = bios_parser_get_smu_clock_info,
+};
+
+static bool bios_parser_construct(
+ struct bios_parser *bp,
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ uint16_t *rom_header_offset = NULL;
+ struct atom_rom_header_v2_2 *rom_header = NULL;
+ struct display_object_info_table_v1_4 *object_info_tbl;
+ struct atom_data_revision tbl_rev = {0};
+
+ if (!init)
+ return false;
+
+ if (!init->bios)
+ return false;
+
+ bp->base.funcs = &vbios_funcs;
+ bp->base.bios = init->bios;
+ bp->base.bios_size = bp->base.bios[OFFSET_TO_ATOM_ROM_IMAGE_SIZE] * BIOS_IMAGE_SIZE_UNIT;
+
+ bp->base.ctx = init->ctx;
+
+ bp->base.bios_local_image = NULL;
+
+ rom_header_offset =
+ GET_IMAGE(uint16_t, OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ if (!rom_header_offset)
+ return false;
+
+ rom_header = GET_IMAGE(struct atom_rom_header_v2_2, *rom_header_offset);
+
+ if (!rom_header)
+ return false;
+
+ get_atom_data_table_revision(&rom_header->table_header, &tbl_rev);
+ if (!(tbl_rev.major >= 2 && tbl_rev.minor >= 2))
+ return false;
+
+ bp->master_data_tbl =
+ GET_IMAGE(struct atom_master_data_table_v2_1,
+ rom_header->masterdatatable_offset);
+
+ if (!bp->master_data_tbl)
+ return false;
+
+ bp->object_info_tbl_offset = DATA_TABLES(displayobjectinfo);
+
+ if (!bp->object_info_tbl_offset)
+ return false;
+
+ object_info_tbl =
+ GET_IMAGE(struct display_object_info_table_v1_4,
+ bp->object_info_tbl_offset);
+
+ if (!object_info_tbl)
+ return false;
+
+ get_atom_data_table_revision(&object_info_tbl->table_header,
+ &bp->object_info_tbl.revision);
+
+ if (bp->object_info_tbl.revision.major == 1
+ && bp->object_info_tbl.revision.minor >= 4) {
+ struct display_object_info_table_v1_4 *tbl_v1_4;
+
+ tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4,
+ bp->object_info_tbl_offset);
+ if (!tbl_v1_4)
+ return false;
+
+ bp->object_info_tbl.v1_4 = tbl_v1_4;
+ } else
+ return false;
+
+ dal_firmware_parser_init_cmd_tbl(bp);
+ dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version);
+
+ bp->base.integrated_info = bios_parser_create_integrated_info(&bp->base);
+
+ return true;
+}
+
+struct dc_bios *firmware_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct bios_parser *bp = NULL;
+
+ bp = kzalloc(sizeof(struct bios_parser), GFP_KERNEL);
+ if (!bp)
+ return NULL;
+
+ if (bios_parser_construct(bp, init, dce_version))
+ return &bp->base;
+
+ kfree(bp);
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h
new file mode 100644
index 000000000000..cb40546cdafe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER2_H__
+#define __DAL_BIOS_PARSER2_H__
+
+struct dc_bios *firmware_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
new file mode 100644
index 000000000000..a8cb039d2572
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "bios_parser_common.h"
+#include "include/grph_object_ctrl_defs.h"
+
+static enum object_type object_type_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_object_type = (bios_object_id & OBJECT_TYPE_MASK)
+ >> OBJECT_TYPE_SHIFT;
+ enum object_type object_type;
+
+ switch (bios_object_type) {
+ case GRAPH_OBJECT_TYPE_GPU:
+ object_type = OBJECT_TYPE_GPU;
+ break;
+ case GRAPH_OBJECT_TYPE_ENCODER:
+ object_type = OBJECT_TYPE_ENCODER;
+ break;
+ case GRAPH_OBJECT_TYPE_CONNECTOR:
+ object_type = OBJECT_TYPE_CONNECTOR;
+ break;
+ case GRAPH_OBJECT_TYPE_ROUTER:
+ object_type = OBJECT_TYPE_ROUTER;
+ break;
+ case GRAPH_OBJECT_TYPE_GENERIC:
+ object_type = OBJECT_TYPE_GENERIC;
+ break;
+ default:
+ object_type = OBJECT_TYPE_UNKNOWN;
+ break;
+ }
+
+ return object_type;
+}
+
+static enum object_enum_id enum_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_enum_id =
+ (bios_object_id & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+ enum object_enum_id id;
+
+ switch (bios_enum_id) {
+ case GRAPH_OBJECT_ENUM_ID1:
+ id = ENUM_ID_1;
+ break;
+ case GRAPH_OBJECT_ENUM_ID2:
+ id = ENUM_ID_2;
+ break;
+ case GRAPH_OBJECT_ENUM_ID3:
+ id = ENUM_ID_3;
+ break;
+ case GRAPH_OBJECT_ENUM_ID4:
+ id = ENUM_ID_4;
+ break;
+ case GRAPH_OBJECT_ENUM_ID5:
+ id = ENUM_ID_5;
+ break;
+ case GRAPH_OBJECT_ENUM_ID6:
+ id = ENUM_ID_6;
+ break;
+ case GRAPH_OBJECT_ENUM_ID7:
+ id = ENUM_ID_7;
+ break;
+ default:
+ id = ENUM_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static uint32_t gpu_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ return (bios_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+}
+
+static enum encoder_id encoder_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_encoder_id = gpu_id_from_bios_object_id(bios_object_id);
+ enum encoder_id id;
+
+ switch (bios_encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+ id = ENCODER_ID_INTERNAL_LVDS;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+ id = ENCODER_ID_INTERNAL_TMDS1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_TMDS2:
+ id = ENCODER_ID_INTERNAL_TMDS2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+ id = ENCODER_ID_INTERNAL_DAC1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+ id = ENCODER_ID_INTERNAL_DAC2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ id = ENCODER_ID_INTERNAL_LVTM1;
+ break;
+ case ENCODER_OBJECT_ID_HDMI_INTERNAL:
+ id = ENCODER_ID_INTERNAL_HDMI;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ id = ENCODER_ID_INTERNAL_KLDSCP_TMDS1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+ id = ENCODER_ID_INTERNAL_KLDSCP_DAC1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+ id = ENCODER_ID_INTERNAL_KLDSCP_DAC2;
+ break;
+ case ENCODER_OBJECT_ID_MVPU_FPGA:
+ id = ENCODER_ID_EXTERNAL_MVPU_FPGA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_DDI:
+ id = ENCODER_ID_INTERNAL_DDI;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ id = ENCODER_ID_INTERNAL_UNIPHY;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ id = ENCODER_ID_INTERNAL_KLDSCP_LVTMA;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ id = ENCODER_ID_INTERNAL_UNIPHY1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ id = ENCODER_ID_INTERNAL_UNIPHY2;
+ break;
+ case ENCODER_OBJECT_ID_ALMOND: /* ENCODER_OBJECT_ID_NUTMEG */
+ id = ENCODER_ID_EXTERNAL_NUTMEG;
+ break;
+ case ENCODER_OBJECT_ID_TRAVIS:
+ id = ENCODER_ID_EXTERNAL_TRAVIS;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ id = ENCODER_ID_INTERNAL_UNIPHY3;
+ break;
+ default:
+ id = ENCODER_ID_UNKNOWN;
+ ASSERT(0);
+ break;
+ }
+
+ return id;
+}
+
+static enum connector_id connector_id_from_bios_object_id(
+ uint32_t bios_object_id)
+{
+ uint32_t bios_connector_id = gpu_id_from_bios_object_id(bios_object_id);
+
+ enum connector_id id;
+
+ switch (bios_connector_id) {
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ id = CONNECTOR_ID_SINGLE_LINK_DVII;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ id = CONNECTOR_ID_DUAL_LINK_DVII;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ id = CONNECTOR_ID_SINGLE_LINK_DVID;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ id = CONNECTOR_ID_DUAL_LINK_DVID;
+ break;
+ case CONNECTOR_OBJECT_ID_VGA:
+ id = CONNECTOR_ID_VGA;
+ break;
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ id = CONNECTOR_ID_HDMI_TYPE_A;
+ break;
+ case CONNECTOR_OBJECT_ID_LVDS:
+ id = CONNECTOR_ID_LVDS;
+ break;
+ case CONNECTOR_OBJECT_ID_PCIE_CONNECTOR:
+ id = CONNECTOR_ID_PCIE;
+ break;
+ case CONNECTOR_OBJECT_ID_HARDCODE_DVI:
+ id = CONNECTOR_ID_HARDCODE_DVI;
+ break;
+ case CONNECTOR_OBJECT_ID_DISPLAYPORT:
+ id = CONNECTOR_ID_DISPLAY_PORT;
+ break;
+ case CONNECTOR_OBJECT_ID_eDP:
+ id = CONNECTOR_ID_EDP;
+ break;
+ case CONNECTOR_OBJECT_ID_MXM:
+ id = CONNECTOR_ID_MXM;
+ break;
+ default:
+ id = CONNECTOR_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static enum generic_id generic_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ uint32_t bios_generic_id = gpu_id_from_bios_object_id(bios_object_id);
+
+ enum generic_id id;
+
+ switch (bios_generic_id) {
+ case GENERIC_OBJECT_ID_MXM_OPM:
+ id = GENERIC_ID_MXM_OPM;
+ break;
+ case GENERIC_OBJECT_ID_GLSYNC:
+ id = GENERIC_ID_GLSYNC;
+ break;
+ case GENERIC_OBJECT_ID_STEREO_PIN:
+ id = GENERIC_ID_STEREO;
+ break;
+ default:
+ id = GENERIC_ID_UNKNOWN;
+ break;
+ }
+
+ return id;
+}
+
+static uint32_t id_from_bios_object_id(enum object_type type,
+ uint32_t bios_object_id)
+{
+ switch (type) {
+ case OBJECT_TYPE_GPU:
+ return gpu_id_from_bios_object_id(bios_object_id);
+ case OBJECT_TYPE_ENCODER:
+ return (uint32_t)encoder_id_from_bios_object_id(bios_object_id);
+ case OBJECT_TYPE_CONNECTOR:
+ return (uint32_t)connector_id_from_bios_object_id(
+ bios_object_id);
+ case OBJECT_TYPE_GENERIC:
+ return generic_id_from_bios_object_id(bios_object_id);
+ default:
+ return 0;
+ }
+}
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id)
+{
+ enum object_type type;
+ enum object_enum_id enum_id;
+ struct graphics_object_id go_id = { 0 };
+
+ type = object_type_from_bios_object_id(bios_object_id);
+
+ if (OBJECT_TYPE_UNKNOWN == type)
+ return go_id;
+
+ enum_id = enum_id_from_bios_object_id(bios_object_id);
+
+ if (ENUM_ID_UNKNOWN == enum_id)
+ return go_id;
+
+ go_id = dal_graphics_object_id_init(
+ id_from_bios_object_id(type, bios_object_id), enum_id, type);
+
+ return go_id;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h
new file mode 100644
index 000000000000..a076c61dfae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __BIOS_PARSER_COMMON_H__
+#define __BIOS_PARSER_COMMON_H__
+
+#include "dm_services.h"
+#include "ObjectID.h"
+
+struct graphics_object_id object_id_from_bios_object_id(uint32_t bios_object_id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
new file mode 100644
index 000000000000..5c9e5108c32c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+#include "bios_parser_helper.h"
+#include "command_table_helper.h"
+#include "command_table.h"
+#include "bios_parser_types_internal.h"
+
+uint8_t *bios_get_image(struct dc_bios *bp,
+ uint32_t offset,
+ uint32_t size)
+{
+ if (bp->bios && offset + size < bp->bios_size)
+ return bp->bios + offset;
+ else
+ return NULL;
+}
+
+#include "reg_helper.h"
+
+#define CTX \
+ bios->ctx
+#define REG(reg)\
+ (bios->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ATOM_ ## field_name ## _SHIFT, ATOM_ ## field_name
+
+bool bios_is_accelerated_mode(
+ struct dc_bios *bios)
+{
+ uint32_t acc_mode;
+ REG_GET(BIOS_SCRATCH_6, S6_ACC_MODE, &acc_mode);
+ return (acc_mode == 1);
+}
+
+
+void bios_set_scratch_acc_mode_change(
+ struct dc_bios *bios)
+{
+ REG_UPDATE(BIOS_SCRATCH_6, S6_ACC_MODE, 1);
+}
+
+
+void bios_set_scratch_critical_state(
+ struct dc_bios *bios,
+ bool state)
+{
+ uint32_t critial_state = state ? 1 : 0;
+ REG_UPDATE(BIOS_SCRATCH_6, S6_CRITICAL_STATE, critial_state);
+}
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
new file mode 100644
index 000000000000..c0047efeb006
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_HELPER_H__
+#define __DAL_BIOS_PARSER_HELPER_H__
+
+struct bios_parser;
+
+uint8_t *bios_get_image(struct dc_bios *bp, uint32_t offset,
+ uint32_t size);
+
+bool bios_is_accelerated_mode(struct dc_bios *bios);
+void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
+void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
+
+#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
new file mode 100644
index 000000000000..0079a1e26efd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+
+#include "bios_parser_interface.h"
+#include "bios_parser.h"
+
+#include "bios_parser2.h"
+
+
+struct dc_bios *dal_bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version)
+{
+ struct dc_bios *bios = NULL;
+
+ bios = firmware_parser_create(init, dce_version);
+
+ /* Fall back to old bios parser for older asics */
+ if (bios == NULL)
+ bios = bios_parser_create(init, dce_version);
+
+ return bios;
+}
+
+void dal_bios_parser_destroy(struct dc_bios **dcb)
+{
+ struct dc_bios *bios = *dcb;
+
+ bios->funcs->bios_parser_destroy(dcb);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
new file mode 100644
index 000000000000..5918923bfb93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+struct atom_data_revision {
+ uint32_t major;
+ uint32_t minor;
+};
+
+struct object_info_table {
+ struct atom_data_revision revision;
+ union {
+ ATOM_OBJECT_HEADER *v1_1;
+ ATOM_OBJECT_HEADER_V3 *v1_3;
+ };
+};
+
+enum spread_spectrum_id {
+ SS_ID_UNKNOWN = 0,
+ SS_ID_DP1 = 0xf1,
+ SS_ID_DP2 = 0xf2,
+ SS_ID_LVLINK_2700MHZ = 0xf3,
+ SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+ struct dc_bios base;
+
+ struct object_info_table object_info_tbl;
+ uint32_t object_info_tbl_offset;
+ ATOM_MASTER_DATA_TABLE *master_data_tbl;
+
+ const struct bios_parser_helper *bios_helper;
+
+ const struct command_table_helper *cmd_helper;
+ struct cmd_tbl cmd_tbl;
+
+ bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+ container_of(dc_bios, struct bios_parser, base)
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h
new file mode 100644
index 000000000000..bf1f5c86e65c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+#define __DAL_BIOS_PARSER_TYPES_BIOS2_H__
+
+#include "dc_bios_types.h"
+#include "bios_parser_helper.h"
+
+/* use atomfirmware_bringup.h only. Not atombios.h anymore */
+
+struct atom_data_revision {
+ uint32_t major;
+ uint32_t minor;
+};
+
+struct object_info_table {
+ struct atom_data_revision revision;
+ union {
+ struct display_object_info_table_v1_4 *v1_4;
+ };
+};
+
+enum spread_spectrum_id {
+ SS_ID_UNKNOWN = 0,
+ SS_ID_DP1 = 0xf1,
+ SS_ID_DP2 = 0xf2,
+ SS_ID_LVLINK_2700MHZ = 0xf3,
+ SS_ID_LVLINK_1620MHZ = 0xf4
+};
+
+struct bios_parser {
+ struct dc_bios base;
+
+ struct object_info_table object_info_tbl;
+ uint32_t object_info_tbl_offset;
+ struct atom_master_data_table_v2_1 *master_data_tbl;
+
+
+ const struct bios_parser_helper *bios_helper;
+
+ const struct command_table_helper *cmd_helper;
+ struct cmd_tbl cmd_tbl;
+
+ bool remap_device_tags;
+};
+
+/* Bios Parser from DC Bios */
+#define BP_FROM_DCB(dc_bios) \
+ container_of(dc_bios, struct bios_parser, base)
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
new file mode 100644
index 000000000000..3f7b2dabc2b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -0,0 +1,2424 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table.h"
+#include "command_table_helper.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal.h"
+
+#define EXEC_BIOS_CMD_TABLE(command, params)\
+ (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command), \
+ &params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(command, frev, crev)\
+ cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(command)\
+ bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ GetIndexIntoMasterTable(COMMAND, command))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
+static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_output_control(struct bios_parser *bp);
+static void init_set_crtc_timing(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+static void init_enable_crtc_mem_req(struct bios_parser *bp);
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_program_clock(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+ init_enable_spread_spectrum_on_ppll(bp);
+ init_adjust_display_pll(bp);
+ init_dac_encoder_control(bp);
+ init_dac_output_control(bp);
+ init_set_crtc_timing(bp);
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+ init_enable_crtc_mem_req(bp);
+ init_program_clock(bp);
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+ uint32_t index)
+{
+ uint8_t frev, crev;
+
+ if (cgs_atom_get_cmd_table_revs(cgs_device,
+ index,
+ &frev, &crev) != 0)
+ return 0;
+ return crev;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** D I G E N C O D E R C O N T R O L
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result encoder_control_digx_v3(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v4(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static enum bp_result encoder_control_digx_v5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+ uint32_t version =
+ BIOS_CMD_TABLE_PARA_REVISION(DIGxEncoderControl);
+
+ switch (version) {
+ case 2:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v3;
+ break;
+ case 4:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v4;
+ break;
+
+ case 5:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v5;
+ break;
+
+ default:
+ init_encoder_control_dig_v1(bp);
+ break;
+ }
+}
+
+static enum bp_result encoder_control_dig_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig1_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+static enum bp_result encoder_control_dig2_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_encoder_control_dig_v1(struct bios_parser *bp)
+{
+ struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+ if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG1EncoderControl))
+ cmd_tbl->encoder_control_dig1 = encoder_control_dig1_v1;
+ else
+ cmd_tbl->encoder_control_dig1 = NULL;
+
+ if (1 == BIOS_CMD_TABLE_PARA_REVISION(DIG2EncoderControl))
+ cmd_tbl->encoder_control_dig2 = encoder_control_dig2_v1;
+ else
+ cmd_tbl->encoder_control_dig2 = NULL;
+
+ cmd_tbl->dig_encoder_control = encoder_control_dig_v1;
+}
+
+static enum bp_result encoder_control_dig_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct cmd_tbl *cmd_tbl = &bp->cmd_tbl;
+
+ if (cntl != NULL)
+ switch (cntl->engine_id) {
+ case ENGINE_ID_DIGA:
+ if (cmd_tbl->encoder_control_dig1 != NULL)
+ result =
+ cmd_tbl->encoder_control_dig1(bp, cntl);
+ break;
+ case ENGINE_ID_DIGB:
+ if (cmd_tbl->encoder_control_dig2 != NULL)
+ result =
+ cmd_tbl->encoder_control_dig2(bp, cntl);
+ break;
+
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum bp_result encoder_control_dig1_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+ bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+ if (EXEC_BIOS_CMD_TABLE(DIG1EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_dig2_v1(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 params = {0};
+
+ bp->cmd_helper->assign_control_parameter(bp->cmd_helper, cntl, &params);
+
+ if (EXEC_BIOS_CMD_TABLE(DIG2EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v3(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 params = {0};
+
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+ else
+ params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ params.ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio);
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v4(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_ENCODER_CONTROL_PARAMETERS_V4 params = {0};
+
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ params.acConfig.ucDPLinkRate = 1; /* dual link 2.7GHz */
+ else
+ params.acConfig.ucDPLinkRate = 0; /* single link 1.62GHz */
+
+ params.acConfig.ucDigSel = (uint8_t)(cntl->engine_id);
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+ params.usPixelClock = cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ params.ucEncoderMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result encoder_control_digx_v5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENCODER_STREAM_SETUP_PARAMETERS_V5 params = {0};
+
+ params.ucDigId = (uint8_t)(cntl->engine_id);
+ params.ucAction = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+ params.ulPixelClock = cntl->pixel_clock / 10;
+ params.ucDigMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.ulPixelClock =
+ (params.ulPixelClock * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ params.ulPixelClock =
+ (params.ulPixelClock * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ params.ulPixelClock =
+ (params.ulPixelClock * 48) / 24;
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** TRANSMITTER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result transmitter_control_v2(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v3(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v4(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_5(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+ uint8_t frev;
+ uint8_t crev;
+
+ if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl,
+ frev, crev) != 0)
+ BREAK_TO_DEBUGGER();
+ switch (crev) {
+ case 2:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v3;
+ break;
+ case 4:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v4;
+ break;
+ case 5:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_5;
+ break;
+ case 6:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+ break;
+ default:
+ bp->cmd_tbl.transmitter_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result transmitter_control_v2(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 params;
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == connector_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == connector_id))
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)cntl->connector_obj_id.id);
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+ params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+ break;
+ default:
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number) {
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ } else
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ if (CONNECTOR_ID_DISPLAY_PORT == connector_id)
+ /* Bit4: DP connector flag
+ * =0 connector is none-DP connector
+ * =1 connector is DP connector
+ */
+ params.acConfig.fDPConnector = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)bp->cmd_helper->transmitter_bp_to_atom(
+ cntl->transmitter);
+
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v3(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 params;
+ uint32_t pll_id;
+ enum connector_id conn_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ bool dual_link_conn = (CONNECTOR_ID_DUAL_LINK_DVII == conn_id)
+ || (CONNECTOR_ID_DUAL_LINK_DVID == conn_id);
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (!cmd->clock_source_id_to_atom(cntl->pll_id, &pll_id))
+ return BP_RESULT_BADINPUT;
+
+ /* fill information based on the action */
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ if (dual_link_conn) {
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+ }
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)cntl->lane_select;
+ params.asMode.ucLaneSet = (uint8_t)cntl->lane_settings;
+ break;
+ default:
+ if (dual_link_conn && cntl->multi_path)
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number) {
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ } else {
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ }
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)cmd->transmitter_bp_to_atom(cntl->transmitter);
+
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+
+ params.acConfig.ucRefClkSource = (uint8_t)pll_id;
+
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v4(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 params;
+ uint32_t ref_clk_src_id;
+ enum connector_id conn_id =
+ dal_graphics_object_id_get_connector_id(cntl->connector_obj_id);
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (cntl->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ case TRANSMITTER_TRAVIS_LCD:
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (!cmd->clock_source_id_to_ref_clk_src(cntl->pll_id, &ref_clk_src_id))
+ return BP_RESULT_BADINPUT;
+
+ switch (cntl->action) {
+ case TRANSMITTER_CONTROL_INIT:
+ {
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+ /* on INIT this bit should be set according to the
+ * phisycal connector
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* connector object id */
+ params.usInitInfo =
+ cpu_to_le16((uint8_t)(cntl->connector_obj_id.id));
+ }
+ break;
+ case TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS:
+ /* votage swing and pre-emphsis */
+ params.asMode.ucLaneSel = (uint8_t)(cntl->lane_select);
+ params.asMode.ucLaneSet = (uint8_t)(cntl->lane_settings);
+ break;
+ default:
+ if ((CONNECTOR_ID_DUAL_LINK_DVII == conn_id) ||
+ (CONNECTOR_ID_DUAL_LINK_DVID == conn_id))
+ /* on ENABLE/DISABLE this bit should be set according to
+ * actual timing (number of lanes)
+ * Bit0: dual link connector flag
+ * =0 connector is single link connector
+ * =1 connector is dual link connector
+ */
+ params.acConfig.fDualLinkConnector = 1;
+
+ /* if dual-link */
+ if (LANE_COUNT_FOUR < cntl->lanes_number)
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 20KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 20));
+ else {
+ /* link rate, half for dual link
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ }
+ break;
+ }
+
+ /* 00 - coherent mode
+ * 01 - incoherent mode
+ */
+
+ params.acConfig.fCoherentMode = cntl->coherent;
+
+ if ((TRANSMITTER_UNIPHY_B == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_D == cntl->transmitter)
+ || (TRANSMITTER_UNIPHY_F == cntl->transmitter))
+ /* Bit2: Transmitter Link selection
+ * =0 when bit0=0, single link A/C/E, when bit0=1,
+ * master link A/C/E
+ * =1 when bit0=0, single link B/D/F, when bit0=1,
+ * master link B/D/F
+ */
+ params.acConfig.ucLinkSel = 1;
+
+ if (ENGINE_ID_DIGB == cntl->engine_id)
+ /* Bit3: Transmitter data source selection
+ * =0 DIGA is data source.
+ * =1 DIGB is data source.
+ * This bit is only useful when ucAction= ATOM_ENABLE
+ */
+ params.acConfig.ucEncoderSel = 1;
+
+ /* Bit[7:6]: Transmitter selection
+ * =0 UNIPHY_ENCODER: UNIPHYA/B
+ * =1 UNIPHY1_ENCODER: UNIPHYC/D
+ * =2 UNIPHY2_ENCODER: UNIPHYE/F
+ * =3 reserved
+ */
+ params.acConfig.ucTransmitterSel =
+ (uint8_t)(cmd->transmitter_bp_to_atom(cntl->transmitter));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+ params.acConfig.ucRefClkSource = (uint8_t)(ref_clk_src_id);
+ params.ucAction = (uint8_t)(cntl->action);
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v1_5(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 params;
+
+ memset(&params, 0, sizeof(params));
+ params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+ params.ucAction = (uint8_t)cntl->action;
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+ params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+
+ params.ucDigMode =
+ cmd->signal_type_to_atom_dig_mode(cntl->signal);
+ params.asConfig.ucPhyClkSrcId =
+ cmd->clock_source_id_to_atom_phy_clk_src_id(cntl->pll_id);
+ /* 00 - coherent mode */
+ params.asConfig.ucCoherentMode = cntl->coherent;
+ params.asConfig.ucHPDSel =
+ cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ params.ucDigEncoderSel =
+ cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ params.ucDPLaneSet = (uint8_t) cntl->lane_settings;
+ params.usSymClock = cpu_to_le16((uint16_t) (cntl->pixel_clock / 10));
+ /*
+ * In SI/TN case, caller have to set usPixelClock as following:
+ * DP mode: usPixelClock = DP_LINK_CLOCK/10
+ * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+ * DVI single link mode: usPixelClock = pixel clock
+ * DVI dual link mode: usPixelClock = pixel clock
+ * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ * LVDS mode: usPixelClock = pixel clock
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_6 params;
+
+ memset(&params, 0, sizeof(params));
+ params.ucPhyId = cmd->phy_id_to_atom(cntl->transmitter);
+ params.ucAction = (uint8_t)cntl->action;
+
+ if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+ params.ucDPLaneSet = (uint8_t)cntl->lane_settings;
+ else
+ params.ucDigMode = cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+ params.ucLaneNum = (uint8_t)cntl->lanes_number;
+ params.ucHPDSel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ params.ucDigEncoderSel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ params.ucConnObjId = (uint8_t)cntl->connector_obj_id.id;
+ params.ulSymClock = cntl->pixel_clock/10;
+
+ /*
+ * In SI/TN case, caller have to set usPixelClock as following:
+ * DP mode: usPixelClock = DP_LINK_CLOCK/10
+ * (DP_LINK_CLOCK = 1.62GHz, 2.7GHz, 5.4GHz)
+ * DVI single link mode: usPixelClock = pixel clock
+ * DVI dual link mode: usPixelClock = pixel clock
+ * HDMI mode: usPixelClock = pixel clock * deep_color_ratio
+ * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
+ * LVDS mode: usPixelClock = pixel clock
+ */
+ switch (cntl->signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 30) / 24);
+ break;
+ case COLOR_DEPTH_121212:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 36) / 24);
+ break;
+ case COLOR_DEPTH_161616:
+ params.ulSymClock =
+ cpu_to_le16((le16_to_cpu(params.ulSymClock) * 48) / 24);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
+ result = BP_RESULT_OK;
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET PIXEL CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_pixel_clock_v3(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+ case 3:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v3;
+ break;
+ case 5:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v5;
+ break;
+ case 6:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v6;
+ break;
+ case 7:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+ break;
+ default:
+ bp->cmd_tbl.set_pixel_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_pixel_clock_v3(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ PIXEL_CLOCK_PARAMETERS_V3 *params;
+ SET_PIXEL_CLOCK_PS_ALLOCATION allocation;
+
+ memset(&allocation, 0, sizeof(allocation));
+
+ if (CLOCK_SOURCE_ID_PLL1 == bp_params->pll_id)
+ allocation.sPCLKInput.ucPpll = ATOM_PPLL1;
+ else if (CLOCK_SOURCE_ID_PLL2 == bp_params->pll_id)
+ allocation.sPCLKInput.ucPpll = ATOM_PPLL2;
+ else
+ return BP_RESULT_BADINPUT;
+
+ allocation.sPCLKInput.usRefDiv =
+ cpu_to_le16((uint16_t)bp_params->reference_divider);
+ allocation.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t)bp_params->feedback_divider);
+ allocation.sPCLKInput.ucFracFbDiv =
+ (uint8_t)bp_params->fractional_feedback_divider;
+ allocation.sPCLKInput.ucPostDiv =
+ (uint8_t)bp_params->pixel_clock_post_divider;
+
+ /* We need to convert from KHz units into 10KHz units */
+ allocation.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+ params = (PIXEL_CLOCK_PARAMETERS_V3 *)&allocation.sPCLKInput;
+ params->ucTransmitterId =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params->ucEncoderMode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false));
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK;
+
+ if (CONTROLLER_ID_D1 != bp_params->controller_id)
+ params->ucMiscInfo |= PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, allocation))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V5
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V5 {
+ PIXEL_CLOCK_PARAMETERS_V5 sPCLKInput;
+ /* Caller doesn't need to init this portion */
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V5;
+#endif
+
+#ifndef SET_PIXEL_CLOCK_PS_ALLOCATION_V6
+/* video bios did not define this: */
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION_V6 {
+ PIXEL_CLOCK_PARAMETERS_V6 sPCLKInput;
+ /* Caller doesn't need to init this portion */
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;
+} SET_PIXEL_CLOCK_PS_ALLOCATION_V6;
+#endif
+
+static enum bp_result set_pixel_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V5 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &controller_id)) {
+ clk.sPCLKInput.ucCRTC = controller_id;
+ clk.sPCLKInput.ucPpll = (uint8_t)pll_id;
+ clk.sPCLKInput.ucRefDiv =
+ (uint8_t)(bp_params->reference_divider);
+ clk.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t)(bp_params->feedback_divider));
+ clk.sPCLKInput.ulFbDivDecFrac =
+ cpu_to_le32(bp_params->fractional_feedback_divider);
+ clk.sPCLKInput.ucPostDiv =
+ (uint8_t)(bp_params->pixel_clock_post_divider);
+ clk.sPCLKInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ clk.sPCLKInput.ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t)(bp_params->target_pixel_clock / 10));
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ /* clkV5.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0: 24bpp
+ * =1:30bpp, =2:32bpp
+ * driver choose program it itself, i.e. here we program it
+ * to 888 by default.
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+static enum bp_result set_pixel_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V6 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.sPCLKInput.ulCrtcPclkFreq.ucCRTC = controller_id;
+ clk.sPCLKInput.ucPpll = (uint8_t) pll_id;
+ clk.sPCLKInput.ucRefDiv =
+ (uint8_t) bp_params->reference_divider;
+ clk.sPCLKInput.usFbDiv =
+ cpu_to_le16((uint16_t) bp_params->feedback_divider);
+ clk.sPCLKInput.ulFbDivDecFrac =
+ cpu_to_le32(bp_params->fractional_feedback_divider);
+ clk.sPCLKInput.ucPostDiv =
+ (uint8_t) bp_params->pixel_clock_post_divider;
+ clk.sPCLKInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ clk.sPCLKInput.ucEncoderMode =
+ (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.sPCLKInput.ulCrtcPclkFreq.ulPixelClock =
+ cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL) {
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL;
+ }
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC) {
+ clk.sPCLKInput.ucMiscInfo |=
+ PIXEL_CLOCK_V6_MISC_REF_DIV_SRC;
+ }
+
+ /* clkV6.ucMiscInfo bit[3:2]= HDMI panel bit depth: =0:
+ * 24bpp =1:30bpp, =2:32bpp
+ * driver choose program it itself, i.e. here we pass required
+ * target rate that includes deep color.
+ */
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ PIXEL_CLOCK_PARAMETERS_V7 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.ucCRTC = controller_id;
+ clk.ucPpll = (uint8_t) pll_id;
+ clk.ucTransmitterID = bp->cmd_helper->encoder_id_to_atom(dal_graphics_object_id_get_encoder_id(bp_params->encoder_object_id));
+ clk.ucEncoderMode = (uint8_t) bp->cmd_helper->encoder_mode_bp_to_atom(bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.ulPixelClock = cpu_to_le32(bp_params->target_pixel_clock * 10);
+
+ clk.ucDeepColorRatio = (uint8_t) bp->cmd_helper->transmitter_color_depth_to_atom(bp_params->color_depth);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC;
+
+ if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+ if (bp_params->flags.SUPPORT_YUV_420)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+ if (bp_params->flags.SET_XTALIN_REF_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+ if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ clk.ucMiscInfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+ }
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE PIXEL CLOCK SS
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+
+static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableSpreadSpectrumOnPPLL)) {
+ case 1:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll =
+ enable_spread_spectrum_on_ppll_v3;
+ break;
+ default:
+ bp->cmd_tbl.enable_spread_spectrum_on_ppll = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v1(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL params;
+
+ memset(&params, 0, sizeof(params));
+
+ if ((enable == true) && (bp_params->percentage > 0))
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ params.usSpreadSpectrumPercentage =
+ cpu_to_le16((uint16_t)bp_params->percentage);
+ params.ucSpreadSpectrumStep =
+ (uint8_t)bp_params->ver1.step;
+ params.ucSpreadSpectrumDelay =
+ (uint8_t)bp_params->ver1.delay;
+ /* convert back to unit of 10KHz */
+ params.ucSpreadSpectrumRange =
+ (uint8_t)(bp_params->ver1.range / 10000);
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |= ATOM_EXTERNAL_SS_MASK;
+
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |= ATOM_SS_CENTRE_SPREAD_MODE;
+
+ if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+ params.ucPpll = ATOM_PPLL1;
+ else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+ params.ucPpll = ATOM_PPLL2;
+ else
+ BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v2(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 params;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL1)
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P1PLL;
+ else if (bp_params->pll_id == CLOCK_SOURCE_ID_PLL2)
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V2_P2PLL;
+ else
+ BREAK_TO_DEBUGGER(); /* Unexpected PLL value!! */
+
+ if ((enable == true) && (bp_params->percentage > 0)) {
+ params.ucEnable = ATOM_ENABLE;
+
+ params.usSpreadSpectrumPercentage =
+ cpu_to_le16((uint16_t)(bp_params->percentage));
+ params.usSpreadSpectrumStep =
+ cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD;
+
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD;
+
+ /* Both amounts need to be left shifted first before bit
+ * comparison. Otherwise, the result will always be zero here
+ */
+ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+ ((bp_params->ds.feedback_amount <<
+ ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK) |
+ ((bp_params->ds.nfrac_amount <<
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK)));
+ } else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result enable_spread_spectrum_on_ppll_v3(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (bp_params->pll_id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ /* ATOM_PPLL_SS_TYPE_V3_P0PLL; this is pixel clock only,
+ * not for SI display clock.
+ */
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P1PLL;
+ break;
+
+ case CLOCK_SOURCE_ID_PLL2:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_P2PLL;
+ break;
+
+ case CLOCK_SOURCE_ID_DCPLL:
+ params.ucSpreadSpectrumType = ATOM_PPLL_SS_TYPE_V3_DCPLL;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ /* Unexpected PLL value!! */
+ return result;
+ }
+
+ if (enable == true) {
+ params.ucEnable = ATOM_ENABLE;
+
+ params.usSpreadSpectrumAmountFrac =
+ cpu_to_le16((uint16_t)(bp_params->ds_frac_amount));
+ params.usSpreadSpectrumStep =
+ cpu_to_le16((uint16_t)(bp_params->ds.ds_frac_size));
+
+ if (bp_params->flags.EXTERNAL_SS)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD;
+ if (bp_params->flags.CENTER_SPREAD)
+ params.ucSpreadSpectrumType |=
+ ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD;
+
+ /* Both amounts need to be left shifted first before bit
+ * comparison. Otherwise, the result will always be zero here
+ */
+ params.usSpreadSpectrumAmount = cpu_to_le16((uint16_t)(
+ ((bp_params->ds.feedback_amount <<
+ ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK) |
+ ((bp_params->ds.nfrac_amount <<
+ ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT) &
+ ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK)));
+ } else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableSpreadSpectrumOnPPLL, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ADJUST DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result adjust_display_pll_v2(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+static enum bp_result adjust_display_pll_v3(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+
+static void init_adjust_display_pll(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(AdjustDisplayPll)) {
+ case 2:
+ bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.adjust_display_pll = adjust_display_pll_v3;
+ break;
+ default:
+ bp->cmd_tbl.adjust_display_pll = NULL;
+ break;
+ }
+}
+
+static enum bp_result adjust_display_pll_v2(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION params = { 0 };
+
+ /* We need to convert from KHz units into 10KHz units and then convert
+ * output pixel clock back 10KHz-->KHz */
+ uint32_t pixel_clock_10KHz_in = bp_params->pixel_clock / 10;
+
+ params.usPixelClock = cpu_to_le16((uint16_t)(pixel_clock_10KHz_in));
+ params.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+ return result;
+}
+
+static enum bp_result adjust_display_pll_v3(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 params;
+ uint32_t pixel_clk_10_kHz_in = bp_params->pixel_clock / 10;
+
+ memset(&params, 0, sizeof(params));
+
+ /* We need to convert from KHz units into 10KHz units and then convert
+ * output pixel clock back 10KHz-->KHz */
+ params.sInput.usPixelClock = cpu_to_le16((uint16_t)pixel_clk_10_kHz_in);
+ params.sInput.ucTransmitterID =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+ params.sInput.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ if (bp_params->ss_enable == true)
+ params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_SS_ENABLE;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ params.sInput.ucDispPllConfig |= DISPPLL_CONFIG_DUAL_LINK;
+
+ if (EXEC_BIOS_CMD_TABLE(AdjustDisplayPll, params)) {
+ /* Convert output pixel clock back 10KHz-->KHz: multiply
+ * original pixel clock in KHz by ratio
+ * [output pxlClk/input pxlClk] */
+ uint64_t pixel_clk_10_khz_out =
+ (uint64_t)le32_to_cpu(params.sOutput.ulDispPllFreq);
+ uint64_t pixel_clk = (uint64_t)bp_params->pixel_clock;
+
+ if (pixel_clk_10_kHz_in != 0) {
+ bp_params->adjusted_pixel_clock =
+ div_u64(pixel_clk * pixel_clk_10_khz_out,
+ pixel_clk_10_kHz_in);
+ } else {
+ bp_params->adjusted_pixel_clock = 0;
+ BREAK_TO_DEBUGGER();
+ }
+
+ bp_params->reference_divider = params.sOutput.ucRefDiv;
+ bp_params->pixel_clock_post_divider = params.sOutput.ucPostDiv;
+
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DAC ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac1_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+static enum bp_result dac2_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+
+static void init_dac_encoder_control(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1EncoderControl)) {
+ case 1:
+ bp->cmd_tbl.dac1_encoder_control = dac1_encoder_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac1_encoder_control = NULL;
+ break;
+ }
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2EncoderControl)) {
+ case 1:
+ bp->cmd_tbl.dac2_encoder_control = dac2_encoder_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac2_encoder_control = NULL;
+ break;
+ }
+}
+
+static void dac_encoder_control_prepare_params(
+ DAC_ENCODER_CONTROL_PS_ALLOCATION *params,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ params->ucDacStandard = dac_standard;
+ if (enable)
+ params->ucAction = ATOM_ENABLE;
+ else
+ params->ucAction = ATOM_DISABLE;
+
+ /* We need to convert from KHz units into 10KHz units
+ * it looks as if the TvControl do not care about pixel clock
+ */
+ params->usPixelClock = cpu_to_le16((uint16_t)(pixel_clock / 10));
+}
+
+static enum bp_result dac1_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+ dac_encoder_control_prepare_params(
+ &params,
+ enable,
+ pixel_clock,
+ dac_standard);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC1EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac2_encoder_control_v1(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_ENCODER_CONTROL_PS_ALLOCATION params;
+
+ dac_encoder_control_prepare_params(
+ &params,
+ enable,
+ pixel_clock,
+ dac_standard);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC2EncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DAC OUTPUT CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result dac1_output_control_v1(
+ struct bios_parser *bp,
+ bool enable);
+static enum bp_result dac2_output_control_v1(
+ struct bios_parser *bp,
+ bool enable);
+
+static void init_dac_output_control(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC1OutputControl)) {
+ case 1:
+ bp->cmd_tbl.dac1_output_control = dac1_output_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac1_output_control = NULL;
+ break;
+ }
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC2OutputControl)) {
+ case 1:
+ bp->cmd_tbl.dac2_output_control = dac2_output_control_v1;
+ break;
+ default:
+ bp->cmd_tbl.dac2_output_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result dac1_output_control_v1(
+ struct bios_parser *bp, bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+ if (enable)
+ params.ucAction = ATOM_ENABLE;
+ else
+ params.ucAction = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(DAC1OutputControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac2_output_control_v1(
+ struct bios_parser *bp, bool enable)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION params;
+
+ if (enable)
+ params.ucAction = ATOM_ENABLE;
+ else
+ params.ucAction = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(DAC2OutputControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET CRTC TIMING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+static enum bp_result set_crtc_timing_v1(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+ uint32_t dtd_version =
+ BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_UsingDTDTiming);
+ if (dtd_version > 2)
+ switch (dtd_version) {
+ case 3:
+ bp->cmd_tbl.set_crtc_timing =
+ set_crtc_using_dtd_timing_v3;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+ else
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetCRTC_Timing)) {
+ case 1:
+ bp->cmd_tbl.set_crtc_timing = set_crtc_timing_v1;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_crtc_timing_v1(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+
+ params.usH_Total = cpu_to_le16((uint16_t)(bp_params->h_total));
+ params.usH_Disp = cpu_to_le16((uint16_t)(bp_params->h_addressable));
+ params.usH_SyncStart = cpu_to_le16((uint16_t)(bp_params->h_sync_start));
+ params.usH_SyncWidth = cpu_to_le16((uint16_t)(bp_params->h_sync_width));
+ params.usV_Total = cpu_to_le16((uint16_t)(bp_params->v_total));
+ params.usV_Disp = cpu_to_le16((uint16_t)(bp_params->v_addressable));
+ params.usV_SyncStart =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start));
+ params.usV_SyncWidth =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_width));
+
+ /* VBIOS does not expect any value except zero into this call, for
+ * underscan use another entry ProgramOverscan call but when mode
+ * 1776x1000 with the overscan 72x44 .e.i. 1920x1080 @30 DAL2 is ok,
+ * but when same ,but 60 Hz there is corruption
+ * DAL1 does not allow the mode 1776x1000@60
+ */
+ params.ucOverscanRight = (uint8_t)bp_params->h_overscan_right;
+ params.ucOverscanLeft = (uint8_t)bp_params->h_overscan_left;
+ params.ucOverscanBottom = (uint8_t)bp_params->v_overscan_bottom;
+ params.ucOverscanTop = (uint8_t)bp_params->v_overscan_top;
+
+ if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+ if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply tis for
+ * non-TV/CV only due to complex MV testing for possible
+ * impact
+ * if (pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field, 2.5
+ * lines for the 2nd feild. we need input as 5 instead
+ * of 4, but it is 4 either from Edid data
+ * (spec CEA 861) or CEA timing table.
+ */
+ params.usV_SyncStart =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start + 1));
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+ if (EXEC_BIOS_CMD_TABLE(SetCRTC_Timing, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SET_CRTC_USING_DTD_TIMING_PARAMETERS params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+
+ /* bios usH_Size wants h addressable size */
+ params.usH_Size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+ /* bios usH_Blanking_Time wants borders included in blanking */
+ params.usH_Blanking_Time =
+ cpu_to_le16((uint16_t)(bp_params->h_total - bp_params->h_addressable));
+ /* bios usV_Size wants v addressable size */
+ params.usV_Size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+ /* bios usV_Blanking_Time wants borders included in blanking */
+ params.usV_Blanking_Time =
+ cpu_to_le16((uint16_t)(bp_params->v_total - bp_params->v_addressable));
+ /* bios usHSyncOffset is the offset from the end of h addressable,
+ * our horizontalSyncStart is the offset from the beginning
+ * of h addressable */
+ params.usH_SyncOffset =
+ cpu_to_le16((uint16_t)(bp_params->h_sync_start - bp_params->h_addressable));
+ params.usH_SyncWidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+ /* bios usHSyncOffset is the offset from the end of v addressable,
+ * our verticalSyncStart is the offset from the beginning of
+ * v addressable */
+ params.usV_SyncOffset =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start - bp_params->v_addressable));
+ params.usV_SyncWidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+ /* we assume that overscan from original timing does not get bigger
+ * than 255
+ * we will program all the borders in the Set CRTC Overscan call below
+ */
+
+ if (0 == bp_params->flags.HSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_HSYNC_POLARITY);
+
+ if (0 == bp_params->flags.VSYNC_POSITIVE_POLARITY)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply this
+ * for non-TV/CV only
+ * due to complex MV testing for possible impact
+ * if ( pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ {
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field,
+ * 2.5 lines for the 2nd feild. we need input as 5
+ * instead of 4.
+ * but it is 4 either from Edid data (spec CEA 861)
+ * or CEA timing table.
+ */
+ params.usV_SyncOffset =
+ cpu_to_le16(le16_to_cpu(params.usV_SyncOffset) + 1);
+
+ }
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.susModeMiscInfo.usAccess =
+ cpu_to_le16(le16_to_cpu(params.susModeMiscInfo.usAccess) | ATOM_DOUBLE_CLOCK_MODE);
+
+ if (EXEC_BIOS_CMD_TABLE(SetCRTC_UsingDTDTiming, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V2 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ /* set controller id */
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+ else
+ return BP_RESULT_FAILURE;
+
+ /* set encoder id */
+ if (bp->cmd_helper->engine_bp_to_atom(
+ bp_params->engine_id, &atom_engine_id))
+ params.ucEncoderID = (uint8_t)atom_engine_id;
+ else
+ return BP_RESULT_FAILURE;
+
+ if (SIGNAL_TYPE_EDP == s ||
+ (SIGNAL_TYPE_DISPLAY_PORT == s &&
+ SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.ucEncodeMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ bool result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+ &atom_controller_id))
+ params.ucCRTC = atom_controller_id;
+ else
+ return result;
+
+ if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+ &atom_engine_id))
+ params.ucEncoderID = (uint8_t)atom_engine_id;
+ else
+ return result;
+
+ if (SIGNAL_TYPE_EDP == s ||
+ (SIGNAL_TYPE_DISPLAY_PORT == s &&
+ SIGNAL_TYPE_LVDS == bp_params->sink_signal))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.ucEncodeMode =
+ bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+ /* Needed for VBIOS Random Spatial Dithering feature */
+ params.ucDstBpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE CRTC
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTC)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_FAILURE;
+ ENABLE_CRTC_PARAMETERS params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+ params.ucCRTC = id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ if (enable)
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableCRTC, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE CRTC MEM REQ
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_crtc_mem_req_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc_mem_req(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableCRTCMemReq)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc_mem_req = enable_crtc_mem_req_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc_mem_req = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_mem_req_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_BADINPUT;
+ ENABLE_CRTC_PARAMETERS params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id)) {
+ params.ucCRTC = id;
+
+ if (enable)
+ params.ucEnable = ATOM_ENABLE;
+ else
+ params.ucEnable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(EnableCRTCMemReq, params))
+ result = BP_RESULT_OK;
+ else
+ result = BP_RESULT_FAILURE;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** DISPLAY PLL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result program_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+static enum bp_result program_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_program_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetPixelClock)) {
+ case 5:
+ bp->cmd_tbl.program_clock = program_clock_v5;
+ break;
+ case 6:
+ bp->cmd_tbl.program_clock = program_clock_v6;
+ break;
+ default:
+ bp->cmd_tbl.program_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result program_clock_v5(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V5 params;
+ uint32_t atom_pll_id;
+
+ memset(&params, 0, sizeof(params));
+ if (!bp->cmd_helper->clock_source_id_to_atom(
+ bp_params->pll_id, &atom_pll_id)) {
+ BREAK_TO_DEBUGGER(); /* Invalid Inpute!! */
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.sPCLKInput.ucPpll = (uint8_t) atom_pll_id;
+ params.sPCLKInput.usPixelClock =
+ cpu_to_le16((uint16_t) (bp_params->target_pixel_clock / 10));
+ params.sPCLKInput.ucCRTC = (uint8_t) ATOM_CRTC_INVALID;
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result program_clock_v6(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_PIXEL_CLOCK_PS_ALLOCATION_V6 params;
+ uint32_t atom_pll_id;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!bp->cmd_helper->clock_source_id_to_atom(
+ bp_params->pll_id, &atom_pll_id)) {
+ BREAK_TO_DEBUGGER(); /*Invalid Input!!*/
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* We need to convert from KHz units into 10KHz units */
+ params.sPCLKInput.ucPpll = (uint8_t)atom_pll_id;
+ params.sPCLKInput.ulDispEngClkFreq =
+ cpu_to_le32(bp_params->target_pixel_clock / 10);
+
+ if (bp_params->flags.SET_EXTERNAL_REF_DIV_SRC)
+ params.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, params)) {
+ /* True display clock is returned by VBIOS if DFS bypass
+ * is enabled. */
+ bp_params->dfs_bypass_display_clock =
+ (uint32_t)(le32_to_cpu(params.sPCLKInput.ulDispEngClkFreq) * 10);
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** EXTERNAL ENCODER CONTROL
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(ExternalEncoderControl)) {
+ case 3:
+ bp->cmd_tbl.external_encoder_control =
+ external_encoder_control_v3;
+ break;
+ default:
+ bp->cmd_tbl.external_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ /* we need use _PS_Alloc struct */
+ EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 params;
+ EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 *cntl_params;
+ struct graphics_object_id encoder;
+ bool is_input_signal_dp = false;
+
+ memset(&params, 0, sizeof(params));
+
+ cntl_params = &params.sExtEncoder;
+
+ encoder = cntl->encoder_id;
+
+ /* check if encoder supports external encoder control table */
+ switch (dal_graphics_object_id_get_encoder_id(encoder)) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ is_input_signal_dp = true;
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return BP_RESULT_BADINPUT;
+ }
+
+ /* Fill information based on the action
+ *
+ * Bit[6:4]: indicate external encoder, applied to all functions.
+ * =0: external encoder1, mapped to external encoder enum id1
+ * =1: external encoder2, mapped to external encoder enum id2
+ *
+ * enum ObjectEnumId
+ * {
+ * EnumId_Unknown = 0,
+ * EnumId_1,
+ * EnumId_2,
+ * };
+ */
+ cntl_params->ucConfig = (uint8_t)((encoder.enum_id - 1) << 4);
+
+ switch (cntl->action) {
+ case EXTERNAL_ENCODER_CONTROL_INIT:
+ /* output display connector type. Only valid in encoder
+ * initialization */
+ cntl_params->usConnectorId =
+ cpu_to_le16((uint16_t)cntl->connector_obj_id.id);
+ break;
+ case EXTERNAL_ENCODER_CONTROL_SETUP:
+ /* EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 pixel clock unit in
+ * 10KHz
+ * output display device pixel clock frequency in unit of 10KHz.
+ * Only valid in setup and enableoutput
+ */
+ cntl_params->usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ /* Indicate display output signal type drive by external
+ * encoder, only valid in setup and enableoutput */
+ cntl_params->ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal, false);
+
+ if (is_input_signal_dp) {
+ /* Bit[0]: indicate link rate, =1: 2.7Ghz, =0: 1.62Ghz,
+ * only valid in encoder setup with DP mode. */
+ if (LINK_RATE_HIGH == cntl->link_rate)
+ cntl_params->ucConfig |= 1;
+ /* output color depth Indicate encoder data bpc format
+ * in DP mode, only valid in encoder setup in DP mode.
+ */
+ cntl_params->ucBitPerColor =
+ (uint8_t)(cntl->color_depth);
+ }
+ /* Indicate how many lanes used by external encoder, only valid
+ * in encoder setup and enableoutput. */
+ cntl_params->ucLaneNum = (uint8_t)(cntl->lanes_number);
+ break;
+ case EXTERNAL_ENCODER_CONTROL_ENABLE:
+ cntl_params->usPixelClock =
+ cpu_to_le16((uint16_t)(cntl->pixel_clock / 10));
+ cntl_params->ucEncoderMode =
+ (uint8_t)bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal, false);
+ cntl_params->ucLaneNum = (uint8_t)cntl->lanes_number;
+ break;
+ default:
+ break;
+ }
+
+ cntl_params->ucAction = (uint8_t)cntl->action;
+
+ if (EXEC_BIOS_CMD_TABLE(ExternalEncoderControl, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** ENABLE DISPLAY POWER GATING
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(EnableDispPowerGating)) {
+ case 1:
+ bp->cmd_tbl.enable_disp_power_gating =
+ enable_disp_power_gating_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.enable_disp_power_gating = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION params = {0};
+ uint8_t atom_crtc_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+ params.ucDispPipeId = atom_crtc_id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ params.ucEnable =
+ bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+ if (EXEC_BIOS_CMD_TABLE(EnableDispPowerGating, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
+ ** SET DCE CLOCK
+ **
+ ********************************************************************************
+ *******************************************************************************/
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SetDCEClock)) {
+ case 1:
+ bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.set_dce_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ SET_DCE_CLOCK_PS_ALLOCATION_V2_1 params;
+ uint32_t atom_pll_id;
+ uint32_t atom_clock_type;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+ !cmd->dc_clock_type_to_atom(bp_params->clock_type, &atom_clock_type))
+ return BP_RESULT_BADINPUT;
+
+ params.asParam.ucDCEClkSrc = atom_pll_id;
+ params.asParam.ucDCEClkType = atom_clock_type;
+
+ if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+ if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+ if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+ if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+ if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+ params.asParam.ucDCEClkFlag |= DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+ }
+ else
+ /* only program clock frequency if display clock is used; VBIOS will program DPREFCLK */
+ /* We need to convert from KHz units into 10KHz units */
+ params.asParam.ulDCEClkFreq = cpu_to_le32(bp_params->target_clock_frequency / 10);
+
+ if (EXEC_BIOS_CMD_TABLE(SetDCEClock, params)) {
+ /* Convert from 10KHz units back to KHz */
+ bp_params->target_clock_frequency = le32_to_cpu(params.asParam.ulDCEClkFreq) * 10;
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
new file mode 100644
index 000000000000..94f3d43a7471
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_H__
+#define __DAL_COMMAND_TABLE_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+ enum bp_result (*dig_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig1)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig2)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*transmitter_control)(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *control);
+ enum bp_result (*set_pixel_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*adjust_display_pll)(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*dac1_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac2_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac1_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*dac2_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*set_crtc_timing)(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*enable_crtc)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*enable_crtc_mem_req)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*program_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*external_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+ enum bp_result (*enable_disp_power_gating)(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+ enum bp_result (*set_dce_clock)(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+};
+
+void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
new file mode 100644
index 000000000000..ba68693758a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -0,0 +1,812 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_interface.h"
+
+#include "command_table2.h"
+#include "command_table_helper2.h"
+#include "bios_parser_helper.h"
+#include "bios_parser_types_internal2.h"
+
+#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
+ (((char *)(&((\
+ struct atom_master_list_of_##MasterOrData##_functions_v2_1 *)0)\
+ ->FieldName)-(char *)0)/sizeof(uint16_t))
+
+#define EXEC_BIOS_CMD_TABLE(fname, params)\
+ (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname), \
+ &params) == 0)
+
+#define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\
+ cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev)
+
+#define BIOS_CMD_TABLE_PARA_REVISION(fname)\
+ bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \
+ GET_INDEX_INTO_MASTER_TABLE(command, fname))
+
+static void init_dig_encoder_control(struct bios_parser *bp);
+static void init_transmitter_control(struct bios_parser *bp);
+static void init_set_pixel_clock(struct bios_parser *bp);
+
+static void init_set_crtc_timing(struct bios_parser *bp);
+
+static void init_select_crtc_source(struct bios_parser *bp);
+static void init_enable_crtc(struct bios_parser *bp);
+
+static void init_external_encoder_control(struct bios_parser *bp);
+static void init_enable_disp_power_gating(struct bios_parser *bp);
+static void init_set_dce_clock(struct bios_parser *bp);
+static void init_get_smu_clock_info(struct bios_parser *bp);
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp)
+{
+ init_dig_encoder_control(bp);
+ init_transmitter_control(bp);
+ init_set_pixel_clock(bp);
+
+ init_set_crtc_timing(bp);
+
+ init_select_crtc_source(bp);
+ init_enable_crtc(bp);
+
+ init_external_encoder_control(bp);
+ init_enable_disp_power_gating(bp);
+ init_set_dce_clock(bp);
+ init_get_smu_clock_info(bp);
+}
+
+static uint32_t bios_cmd_table_para_revision(void *cgs_device,
+ uint32_t index)
+{
+ uint8_t frev, crev;
+
+ if (cgs_atom_get_cmd_table_revs(cgs_device,
+ index,
+ &frev, &crev) != 0)
+ return 0;
+ return crev;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** D I G E N C O D E R C O N T R O L
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result encoder_control_digx_v1_5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl);
+
+static void init_dig_encoder_control(struct bios_parser *bp)
+{
+ uint32_t version =
+ BIOS_CMD_TABLE_PARA_REVISION(digxencodercontrol);
+
+ switch (version) {
+ case 5:
+ bp->cmd_tbl.dig_encoder_control = encoder_control_digx_v1_5;
+ break;
+ default:
+ bp->cmd_tbl.dig_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result encoder_control_digx_v1_5(
+ struct bios_parser *bp,
+ struct bp_encoder_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct dig_encoder_stream_setup_parameters_v1_5 params = {0};
+
+ params.digid = (uint8_t)(cntl->engine_id);
+ params.action = bp->cmd_helper->encoder_action_to_atom(cntl->action);
+
+ params.pclk_10khz = cntl->pixel_clock / 10;
+ params.digmode =
+ (uint8_t)(bp->cmd_helper->encoder_mode_bp_to_atom(
+ cntl->signal,
+ cntl->enable_dp_audio));
+ params.lanenum = (uint8_t)(cntl->lanes_number);
+
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_888:
+ params.bitpercolor = PANEL_8BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ params.bitpercolor = PANEL_10BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ params.bitpercolor = PANEL_12BIT_PER_COLOR;
+ break;
+ case COLOR_DEPTH_161616:
+ params.bitpercolor = PANEL_16BIT_PER_COLOR;
+ break;
+ default:
+ break;
+ }
+
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.pclk_10khz =
+ (params.pclk_10khz * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ params.pclk_10khz =
+ (params.pclk_10khz * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ params.pclk_10khz =
+ (params.pclk_10khz * 48) / 24;
+ break;
+ default:
+ break;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*****************************************************************************
+ ******************************************************************************
+ **
+ ** TRANSMITTER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl);
+
+static void init_transmitter_control(struct bios_parser *bp)
+{
+ uint8_t frev;
+ uint8_t crev;
+
+ if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0)
+ BREAK_TO_DEBUGGER();
+ switch (crev) {
+ case 6:
+ bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
+ break;
+ default:
+ bp->cmd_tbl.transmitter_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result transmitter_control_v1_6(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+ struct dig_transmitter_control_ps_allocation_v1_6 ps = { { 0 } };
+
+ ps.param.phyid = cmd->phy_id_to_atom(cntl->transmitter);
+ ps.param.action = (uint8_t)cntl->action;
+
+ if (cntl->action == TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS)
+ ps.param.mode_laneset.dplaneset = (uint8_t)cntl->lane_settings;
+ else
+ ps.param.mode_laneset.digmode =
+ cmd->signal_type_to_atom_dig_mode(cntl->signal);
+
+ ps.param.lanenum = (uint8_t)cntl->lanes_number;
+ ps.param.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
+ ps.param.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
+ ps.param.connobj_id = (uint8_t)cntl->connector_obj_id.id;
+ ps.param.symclk_10khz = cntl->pixel_clock/10;
+
+
+ if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
+ cntl->action == TRANSMITTER_CONTROL_ACTIAVATE ||
+ cntl->action == TRANSMITTER_CONTROL_DEACTIVATE) {
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+ "%s:ps.param.symclk_10khz = %d\n",\
+ __func__, ps.param.symclk_10khz);
+ }
+
+
+/*color_depth not used any more, driver has deep color factor in the Phyclk*/
+ if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
+ result = BP_RESULT_OK;
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SET PIXEL CLOCK
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+
+static void init_set_pixel_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) {
+ case 7:
+ bp->cmd_tbl.set_pixel_clock = set_pixel_clock_v7;
+ break;
+ default:
+ bp->cmd_tbl.set_pixel_clock = NULL;
+ break;
+ }
+}
+
+
+
+static enum bp_result set_pixel_clock_v7(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct set_pixel_clock_parameter_v1_7 clk;
+ uint8_t controller_id;
+ uint32_t pll_id;
+
+ memset(&clk, 0, sizeof(clk));
+
+ if (bp->cmd_helper->clock_source_id_to_atom(bp_params->pll_id, &pll_id)
+ && bp->cmd_helper->controller_id_to_atom(bp_params->
+ controller_id, &controller_id)) {
+ /* Note: VBIOS still wants to use ucCRTC name which is now
+ * 1 byte in ULONG
+ *typedef struct _CRTC_PIXEL_CLOCK_FREQ
+ *{
+ * target the pixel clock to drive the CRTC timing.
+ * ULONG ulPixelClock:24;
+ * 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to
+ * previous version.
+ * ATOM_CRTC1~6, indicate the CRTC controller to
+ * ULONG ucCRTC:8;
+ * drive the pixel clock. not used for DCPLL case.
+ *}CRTC_PIXEL_CLOCK_FREQ;
+ *union
+ *{
+ * pixel clock and CRTC id frequency
+ * CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;
+ * ULONG ulDispEngClkFreq; dispclk frequency
+ *};
+ */
+ clk.crtc_id = controller_id;
+ clk.pll_id = (uint8_t) pll_id;
+ clk.encoderobjid =
+ bp->cmd_helper->encoder_id_to_atom(
+ dal_graphics_object_id_get_encoder_id(
+ bp_params->encoder_object_id));
+
+ clk.encoder_mode = (uint8_t) bp->
+ cmd_helper->encoder_mode_bp_to_atom(
+ bp_params->signal_type, false);
+
+ /* We need to convert from KHz units into 10KHz units */
+ clk.pixclk_100hz = cpu_to_le32(bp_params->target_pixel_clock *
+ 10);
+
+ clk.deep_color_ratio =
+ (uint8_t) bp->cmd_helper->
+ transmitter_color_depth_to_atom(
+ bp_params->color_depth);
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,\
+ "%s:program display clock = %d"\
+ "colorDepth = %d\n", __func__,\
+ bp_params->target_pixel_clock, bp_params->color_depth);
+
+ if (bp_params->flags.FORCE_PROGRAMMING_OF_PLL)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_FORCE_PROG_PPLL;
+
+ if (bp_params->flags.PROGRAM_PHY_PLL_ONLY)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_PROG_PHYPLL;
+
+ if (bp_params->flags.SUPPORT_YUV_420)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_YUV420_MODE;
+
+ if (bp_params->flags.SET_XTALIN_REF_SRC)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_XTALIN;
+
+ if (bp_params->flags.SET_GENLOCK_REF_DIV_SRC)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_REF_DIV_SRC_GENLK;
+
+ if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
+ clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
+
+ if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
+ result = BP_RESULT_OK;
+ }
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SET CRTC TIMING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+static void init_set_crtc_timing(struct bios_parser *bp)
+{
+ uint32_t dtd_version =
+ BIOS_CMD_TABLE_PARA_REVISION(setcrtc_usingdtdtiming);
+
+ switch (dtd_version) {
+ case 3:
+ bp->cmd_tbl.set_crtc_timing =
+ set_crtc_using_dtd_timing_v3;
+ break;
+ default:
+ bp->cmd_tbl.set_crtc_timing = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_crtc_using_dtd_timing_v3(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ struct set_crtc_using_dtd_timing_parameters params = {0};
+ uint8_t atom_controller_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(
+ bp_params->controller_id, &atom_controller_id))
+ params.crtc_id = atom_controller_id;
+
+ /* bios usH_Size wants h addressable size */
+ params.h_size = cpu_to_le16((uint16_t)bp_params->h_addressable);
+ /* bios usH_Blanking_Time wants borders included in blanking */
+ params.h_blanking_time =
+ cpu_to_le16((uint16_t)(bp_params->h_total -
+ bp_params->h_addressable));
+ /* bios usV_Size wants v addressable size */
+ params.v_size = cpu_to_le16((uint16_t)bp_params->v_addressable);
+ /* bios usV_Blanking_Time wants borders included in blanking */
+ params.v_blanking_time =
+ cpu_to_le16((uint16_t)(bp_params->v_total -
+ bp_params->v_addressable));
+ /* bios usHSyncOffset is the offset from the end of h addressable,
+ * our horizontalSyncStart is the offset from the beginning
+ * of h addressable
+ */
+ params.h_syncoffset =
+ cpu_to_le16((uint16_t)(bp_params->h_sync_start -
+ bp_params->h_addressable));
+ params.h_syncwidth = cpu_to_le16((uint16_t)bp_params->h_sync_width);
+ /* bios usHSyncOffset is the offset from the end of v addressable,
+ * our verticalSyncStart is the offset from the beginning of
+ * v addressable
+ */
+ params.v_syncoffset =
+ cpu_to_le16((uint16_t)(bp_params->v_sync_start -
+ bp_params->v_addressable));
+ params.v_syncwidth = cpu_to_le16((uint16_t)bp_params->v_sync_width);
+
+ /* we assume that overscan from original timing does not get bigger
+ * than 255
+ * we will program all the borders in the Set CRTC Overscan call below
+ */
+
+ if (bp_params->flags.HSYNC_POSITIVE_POLARITY == 0)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_HSYNC_POLARITY);
+
+ if (bp_params->flags.VSYNC_POSITIVE_POLARITY == 0)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_VSYNC_POLARITY);
+
+ if (bp_params->flags.INTERLACE) {
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ ATOM_INTERLACE);
+
+ /* original DAL code has this condition to apply this
+ * for non-TV/CV only
+ * due to complex MV testing for possible impact
+ * if ( pACParameters->signal != SignalType_YPbPr &&
+ * pACParameters->signal != SignalType_Composite &&
+ * pACParameters->signal != SignalType_SVideo)
+ */
+ {
+ /* HW will deduct 0.5 line from 2nd feild.
+ * i.e. for 1080i, it is 2 lines for 1st field,
+ * 2.5 lines for the 2nd feild. we need input as 5
+ * instead of 4.
+ * but it is 4 either from Edid data (spec CEA 861)
+ * or CEA timing table.
+ */
+ params.v_syncoffset =
+ cpu_to_le16(le16_to_cpu(params.v_syncoffset) +
+ 1);
+
+ }
+ }
+
+ if (bp_params->flags.HORZ_COUNT_BY_TWO)
+ params.modemiscinfo =
+ cpu_to_le16(le16_to_cpu(params.modemiscinfo) |
+ 0x100); /* ATOM_DOUBLE_CLOCK_MODE */
+
+ if (EXEC_BIOS_CMD_TABLE(setcrtc_usingdtdtiming, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** SELECT CRTC SOURCE
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(selectcrtc_source)) {
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ bool result = BP_RESULT_FAILURE;
+ struct select_crtc_source_parameters_v2_3 params;
+ uint8_t atom_controller_id;
+ uint32_t atom_engine_id;
+ enum signal_type s = bp_params->signal;
+
+ memset(&params, 0, sizeof(params));
+
+ if (bp->cmd_helper->controller_id_to_atom(bp_params->controller_id,
+ &atom_controller_id))
+ params.crtc_id = atom_controller_id;
+ else
+ return result;
+
+ if (bp->cmd_helper->engine_bp_to_atom(bp_params->engine_id,
+ &atom_engine_id))
+ params.encoder_id = (uint8_t)atom_engine_id;
+ else
+ return result;
+
+ if (s == SIGNAL_TYPE_EDP ||
+ (s == SIGNAL_TYPE_DISPLAY_PORT && bp_params->sink_signal ==
+ SIGNAL_TYPE_LVDS))
+ s = SIGNAL_TYPE_LVDS;
+
+ params.encode_mode =
+ bp->cmd_helper->encoder_mode_bp_to_atom(
+ s, bp_params->enable_dp_audio);
+ /* Needed for VBIOS Random Spatial Dithering feature */
+ params.dst_bpc = (uint8_t)(bp_params->display_output_bit_depth);
+
+ if (EXEC_BIOS_CMD_TABLE(selectcrtc_source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** ENABLE CRTC
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+
+static void init_enable_crtc(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(enablecrtc)) {
+ case 1:
+ bp->cmd_tbl.enable_crtc = enable_crtc_v1;
+ break;
+ default:
+ bp->cmd_tbl.enable_crtc = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_crtc_v1(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable)
+{
+ bool result = BP_RESULT_FAILURE;
+ struct enable_crtc_parameters params = {0};
+ uint8_t id;
+
+ if (bp->cmd_helper->controller_id_to_atom(controller_id, &id))
+ params.crtc_id = id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ if (enable)
+ params.enable = ATOM_ENABLE;
+ else
+ params.enable = ATOM_DISABLE;
+
+ if (EXEC_BIOS_CMD_TABLE(enablecrtc, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** DISPLAY PLL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** EXTERNAL ENCODER CONTROL
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+
+static void init_external_encoder_control(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(externalencodercontrol)) {
+ case 3:
+ bp->cmd_tbl.external_encoder_control =
+ external_encoder_control_v3;
+ break;
+ default:
+ bp->cmd_tbl.external_encoder_control = NULL;
+ break;
+ }
+}
+
+static enum bp_result external_encoder_control_v3(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl)
+{
+ /* TODO */
+ return BP_RESULT_OK;
+}
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** ENABLE DISPLAY POWER GATING
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+
+static void init_enable_disp_power_gating(
+ struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)) {
+ case 1:
+ bp->cmd_tbl.enable_disp_power_gating =
+ enable_disp_power_gating_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.enable_disp_power_gating = NULL;
+ break;
+ }
+}
+
+static enum bp_result enable_disp_power_gating_v2_1(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+
+ struct enable_disp_power_gating_ps_allocation ps = { { 0 } };
+ uint8_t atom_crtc_id;
+
+ if (bp->cmd_helper->controller_id_to_atom(crtc_id, &atom_crtc_id))
+ ps.param.disp_pipe_id = atom_crtc_id;
+ else
+ return BP_RESULT_BADINPUT;
+
+ ps.param.enable =
+ bp->cmd_helper->disp_power_gating_action_to_atom(action);
+
+ if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/******************************************************************************
+*******************************************************************************
+ **
+ ** SET DCE CLOCK
+ **
+*******************************************************************************
+*******************************************************************************/
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+
+static void init_set_dce_clock(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(setdceclock)) {
+ case 1:
+ bp->cmd_tbl.set_dce_clock = set_dce_clock_v2_1;
+ break;
+ default:
+ bp->cmd_tbl.set_dce_clock = NULL;
+ break;
+ }
+}
+
+static enum bp_result set_dce_clock_v2_1(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+
+ struct set_dce_clock_ps_allocation_v2_1 params;
+ uint32_t atom_pll_id;
+ uint32_t atom_clock_type;
+ const struct command_table_helper *cmd = bp->cmd_helper;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!cmd->clock_source_id_to_atom(bp_params->pll_id, &atom_pll_id) ||
+ !cmd->dc_clock_type_to_atom(bp_params->clock_type,
+ &atom_clock_type))
+ return BP_RESULT_BADINPUT;
+
+ params.param.dceclksrc = atom_pll_id;
+ params.param.dceclktype = atom_clock_type;
+
+ if (bp_params->clock_type == DCECLOCK_TYPE_DPREFCLK) {
+ if (bp_params->flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENLK;
+
+ if (bp_params->flags.USE_PCIE_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_PCIE;
+
+ if (bp_params->flags.USE_XTALIN_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_XTALIN;
+
+ if (bp_params->flags.USE_GENERICA_AS_SOURCE_FOR_DPREFCLK)
+ params.param.dceclkflag |=
+ DCE_CLOCK_FLAG_PLL_REFCLK_SRC_GENERICA;
+ } else
+ /* only program clock frequency if display clock is used;
+ * VBIOS will program DPREFCLK
+ * We need to convert from KHz units into 10KHz units
+ */
+ params.param.dceclk_10khz = cpu_to_le32(
+ bp_params->target_clock_frequency / 10);
+ dm_logger_write(bp->base.ctx->logger, LOG_BIOS,
+ "%s:target_clock_frequency = %d"\
+ "clock_type = %d \n", __func__,\
+ bp_params->target_clock_frequency,\
+ bp_params->clock_type);
+
+ if (EXEC_BIOS_CMD_TABLE(setdceclock, params)) {
+ /* Convert from 10KHz units back to KHz */
+ bp_params->target_clock_frequency = le32_to_cpu(
+ params.param.dceclk_10khz) * 10;
+ result = BP_RESULT_OK;
+ }
+
+ return result;
+}
+
+
+/******************************************************************************
+ ******************************************************************************
+ **
+ ** GET SMU CLOCK INFO
+ **
+ ******************************************************************************
+ *****************************************************************************/
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp);
+
+static void init_get_smu_clock_info(struct bios_parser *bp)
+{
+ /* TODO add switch for table vrsion */
+ bp->cmd_tbl.get_smu_clock_info = get_smu_clock_info_v3_1;
+
+}
+
+static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp)
+{
+ struct atom_get_smu_clock_info_parameters_v3_1 smu_input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 smu_output;
+
+ smu_input.command = GET_SMU_CLOCK_INFO_V3_1_GET_PLLVCO_FREQ;
+
+ /* Get Specific Clock */
+ if (EXEC_BIOS_CMD_TABLE(getsmuclockinfo, smu_input)) {
+ memmove(&smu_output, &smu_input, sizeof(
+ struct atom_get_smu_clock_info_parameters_v3_1));
+ return smu_output.atom_smu_outputclkfreq.syspllvcofreq_10khz;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
new file mode 100644
index 000000000000..59061b806df5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE2_H__
+#define __DAL_COMMAND_TABLE2_H__
+
+struct bios_parser;
+struct bp_encoder_control;
+
+struct cmd_tbl {
+ enum bp_result (*dig_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig1)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*encoder_control_dig2)(
+ struct bios_parser *bp,
+ struct bp_encoder_control *control);
+ enum bp_result (*transmitter_control)(
+ struct bios_parser *bp,
+ struct bp_transmitter_control *control);
+ enum bp_result (*set_pixel_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct bios_parser *bp,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*adjust_display_pll)(
+ struct bios_parser *bp,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*dac1_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac2_encoder_control)(
+ struct bios_parser *bp,
+ bool enable,
+ uint32_t pixel_clock,
+ uint8_t dac_standard);
+ enum bp_result (*dac1_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*dac2_output_control)(
+ struct bios_parser *bp,
+ bool enable);
+ enum bp_result (*set_crtc_timing)(
+ struct bios_parser *bp,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*enable_crtc)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*enable_crtc_mem_req)(
+ struct bios_parser *bp,
+ enum controller_id controller_id,
+ bool enable);
+ enum bp_result (*program_clock)(
+ struct bios_parser *bp,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*external_encoder_control)(
+ struct bios_parser *bp,
+ struct bp_external_encoder_control *cntl);
+ enum bp_result (*enable_disp_power_gating)(
+ struct bios_parser *bp,
+ enum controller_id crtc_id,
+ enum bp_pipe_control_action action);
+ enum bp_result (*set_dce_clock)(
+ struct bios_parser *bp,
+ struct bp_set_dce_clock_parameters *bp_params);
+ unsigned int (*get_smu_clock_info)(
+ struct bios_parser *bp);
+
+};
+
+void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
new file mode 100644
index 000000000000..2979358c6a55
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper(
+ const struct command_table_helper **h,
+ enum dce_version dce)
+{
+ switch (dce) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ *h = dal_cmd_tbl_helper_dce80_get_table();
+ return true;
+
+ case DCE_VERSION_10_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_2:
+ *h = dal_cmd_tbl_helper_dce112_get_table();
+ return true;
+
+ default:
+ /* Unsupported DCE */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+ enum controller_id id,
+ uint8_t *atom_id)
+{
+ if (atom_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CONTROLLER_ID_D0:
+ *atom_id = ATOM_CRTC1;
+ return true;
+ case CONTROLLER_ID_D1:
+ *atom_id = ATOM_CRTC2;
+ return true;
+ case CONTROLLER_ID_D2:
+ *atom_id = ATOM_CRTC3;
+ return true;
+ case CONTROLLER_ID_D3:
+ *atom_id = ATOM_CRTC4;
+ return true;
+ case CONTROLLER_ID_D4:
+ *atom_id = ATOM_CRTC5;
+ return true;
+ case CONTROLLER_ID_D5:
+ *atom_id = ATOM_CRTC6;
+ return true;
+ case CONTROLLER_ID_UNDERLAY0:
+ *atom_id = ATOM_UNDERLAY_PIPE0;
+ return true;
+ case CONTROLLER_ID_UNDEFINED:
+ *atom_id = ATOM_CRTC_INVALID;
+ return true;
+ default:
+ /* Wrong controller id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+* Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+* input transmitter
+* output digitalTransmitter
+* // =00: Digital Transmitter1 ( UNIPHY linkAB )
+* // =01: Digital Transmitter2 ( UNIPHY linkCD )
+* // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+ enum transmitter t)
+{
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_TRAVIS_LCD:
+ return 0;
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ return 1;
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ return 2;
+ default:
+ /* Invalid Transmitter Type! */
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+ enum signal_type s,
+ bool enable_dp_audio)
+{
+ switch (s) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return ATOM_ENCODER_MODE_DVI;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return ATOM_ENCODER_MODE_HDMI;
+ case SIGNAL_TYPE_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_VIRTUAL:
+ if (enable_dp_audio)
+ return ATOM_ENCODER_MODE_DP_AUDIO;
+ else
+ return ATOM_ENCODER_MODE_DP;
+ case SIGNAL_TYPE_RGB:
+ return ATOM_ENCODER_MODE_CRT;
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+}
+
+void dal_cmd_table_helper_assign_control_parameter(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param)
+{
+ /* there are three transmitter blocks, each one has two links 4-lanes
+ * each, A+B, C+D, E+F, Uniphy A, C and E are enumerated as link 0 in
+ * each transmitter block B, D and F as link 1, third transmitter block
+ * has non splitable links (UniphyE and UniphyF can not be configured
+ * separately to drive two different streams)
+ */
+ if ((control->transmitter == TRANSMITTER_UNIPHY_B) ||
+ (control->transmitter == TRANSMITTER_UNIPHY_D) ||
+ (control->transmitter == TRANSMITTER_UNIPHY_F)) {
+ /* Bit2: Link Select
+ * =0: PHY linkA/C/E
+ * =1: PHY linkB/D/F
+ */
+ ctrl_param->acConfig.ucLinkSel = 1;
+ }
+
+ /* Bit[4:3]: Transmitter Selection
+ * =00: Digital Transmitter1 ( UNIPHY linkAB )
+ * =01: Digital Transmitter2 ( UNIPHY linkCD )
+ * =02: Digital Transmitter3 ( UNIPHY linkEF )
+ * =03: Reserved
+ */
+ ctrl_param->acConfig.ucTransmitterSel =
+ (uint8_t)(h->transmitter_bp_to_atom(control->transmitter));
+
+ /* We need to convert from KHz units into 10KHz units */
+ ctrl_param->ucAction = h->encoder_action_to_atom(control->action);
+ ctrl_param->usPixelClock = cpu_to_le16((uint16_t)(control->pixel_clock / 10));
+ ctrl_param->ucEncoderMode =
+ (uint8_t)(h->encoder_mode_bp_to_atom(
+ control->signal, control->enable_dp_audio));
+ ctrl_param->ucLaneNum = (uint8_t)(control->lanes_number);
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id)
+{
+ if (ref_clk_src_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL1:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+ return true;
+ case CLOCK_SOURCE_ID_PLL2:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+ return true;
+ case CLOCK_SOURCE_ID_DCPLL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+ return true;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+ return true;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+ return true;
+ default:
+ /* Unsupported clock source id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+ enum encoder_id id)
+{
+ switch (id) {
+ case ENCODER_ID_INTERNAL_LVDS:
+ return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ case ENCODER_ID_INTERNAL_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ case ENCODER_ID_INTERNAL_TMDS2:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+ case ENCODER_ID_INTERNAL_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ case ENCODER_ID_INTERNAL_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ case ENCODER_ID_INTERNAL_LVTM1:
+ return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ case ENCODER_ID_INTERNAL_HDMI:
+ return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return ENCODER_OBJECT_ID_TRAVIS;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ return ENCODER_OBJECT_ID_NUTMEG;
+ case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+ return ENCODER_OBJECT_ID_MVPU_FPGA;
+ case ENCODER_ID_INTERNAL_DDI:
+ return ENCODER_OBJECT_ID_INTERNAL_DDI;
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+ case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+ case ENCODER_ID_INTERNAL_WIRELESS:
+ return ENCODER_OBJECT_ID_INTERNAL_VCE;
+ case ENCODER_ID_UNKNOWN:
+ return ENCODER_OBJECT_ID_NONE;
+ default:
+ /* Invalid encoder id */
+ BREAK_TO_DEBUGGER();
+ return ENCODER_OBJECT_ID_NONE;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
new file mode 100644
index 000000000000..1fab634b66be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_H__
+#define __DAL_COMMAND_TABLE_HELPER_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper_dce112.h"
+
+struct command_table_helper {
+ bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+ uint8_t (*encoder_action_to_atom)(
+ enum bp_encoder_control_action action);
+ uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+ bool enable_dp_audio);
+ bool (*engine_bp_to_atom)(enum engine_id engine_id,
+ uint32_t *atom_engine_id);
+ void (*assign_control_parameter)(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+ bool (*clock_source_id_to_atom)(enum clock_source_id id,
+ uint32_t *atom_pll_id);
+ bool (*clock_source_id_to_ref_clk_src)(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+ uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+ uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+ uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+ enum clock_source_id id);
+ uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+ uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+ uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+ uint8_t (*phy_id_to_atom)(enum transmitter t);
+ uint8_t (*disp_power_gating_action_to_atom)(
+ enum bp_pipe_control_action action);
+ bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type);
+ uint8_t (*transmitter_color_depth_to_atom)(enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper(const struct command_table_helper **h,
+ enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom(
+ enum controller_id id,
+ uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom(
+ enum signal_type s,
+ bool enable_dp_audio);
+
+void dal_cmd_table_helper_assign_control_parameter(
+ const struct command_table_helper *h,
+ struct bp_encoder_control *control,
+DIG_ENCODER_CONTROL_PARAMETERS_V2 *ctrl_param);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom(
+ enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom(
+ enum encoder_id id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
new file mode 100644
index 000000000000..9a4d30dd4969
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "ObjectID.h"
+#include "atomfirmware.h"
+
+#include "include/bios_parser_types.h"
+
+#include "command_table_helper2.h"
+
+bool dal_bios_parser_init_cmd_tbl_helper2(
+ const struct command_table_helper **h,
+ enum dce_version dce)
+{
+ switch (dce) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ *h = dal_cmd_tbl_helper_dce80_get_table();
+ return true;
+
+ case DCE_VERSION_10_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_0:
+ *h = dal_cmd_tbl_helper_dce110_get_table();
+ return true;
+
+ case DCE_VERSION_11_2:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+#endif
+
+ case DCE_VERSION_12_0:
+ *h = dal_cmd_tbl_helper_dce112_get_table2();
+ return true;
+
+ default:
+ /* Unsupported DCE */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/* real implementations */
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+ enum controller_id id,
+ uint8_t *atom_id)
+{
+ if (atom_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CONTROLLER_ID_D0:
+ *atom_id = ATOM_CRTC1;
+ return true;
+ case CONTROLLER_ID_D1:
+ *atom_id = ATOM_CRTC2;
+ return true;
+ case CONTROLLER_ID_D2:
+ *atom_id = ATOM_CRTC3;
+ return true;
+ case CONTROLLER_ID_D3:
+ *atom_id = ATOM_CRTC4;
+ return true;
+ case CONTROLLER_ID_D4:
+ *atom_id = ATOM_CRTC5;
+ return true;
+ case CONTROLLER_ID_D5:
+ *atom_id = ATOM_CRTC6;
+ return true;
+ /* TODO :case CONTROLLER_ID_UNDERLAY0:
+ *atom_id = ATOM_UNDERLAY_PIPE0;
+ return true;
+ */
+ case CONTROLLER_ID_UNDEFINED:
+ *atom_id = ATOM_CRTC_INVALID;
+ return true;
+ default:
+ /* Wrong controller id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+/**
+* translate_transmitter_bp_to_atom
+*
+* @brief
+* Translate the Transmitter to the corresponding ATOM BIOS value
+*
+* @param
+* input transmitter
+* output digitalTransmitter
+* // =00: Digital Transmitter1 ( UNIPHY linkAB )
+* // =01: Digital Transmitter2 ( UNIPHY linkCD )
+* // =02: Digital Transmitter3 ( UNIPHY linkEF )
+*/
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+ enum transmitter t)
+{
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ case TRANSMITTER_UNIPHY_B:
+ case TRANSMITTER_TRAVIS_LCD:
+ return 0;
+ case TRANSMITTER_UNIPHY_C:
+ case TRANSMITTER_UNIPHY_D:
+ return 1;
+ case TRANSMITTER_UNIPHY_E:
+ case TRANSMITTER_UNIPHY_F:
+ return 2;
+ default:
+ /* Invalid Transmitter Type! */
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+}
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+ enum signal_type s,
+ bool enable_dp_audio)
+{
+ switch (s) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return ATOM_ENCODER_MODE_DVI;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return ATOM_ENCODER_MODE_HDMI;
+ case SIGNAL_TYPE_LVDS:
+ return ATOM_ENCODER_MODE_LVDS;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_VIRTUAL:
+ if (enable_dp_audio)
+ return ATOM_ENCODER_MODE_DP_AUDIO;
+ else
+ return ATOM_ENCODER_MODE_DP;
+ case SIGNAL_TYPE_RGB:
+ return ATOM_ENCODER_MODE_CRT;
+ default:
+ return ATOM_ENCODER_MODE_CRT;
+ }
+}
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id)
+{
+ if (ref_clk_src_id == NULL) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL1:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P1PLL;
+ return true;
+ case CLOCK_SOURCE_ID_PLL2:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_P2PLL;
+ return true;
+ /*TODO:case CLOCK_SOURCE_ID_DCPLL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_DCPLL;
+ return true;
+ */
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_EXTCLK;
+ return true;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ *ref_clk_src_id = ENCODER_REFCLK_SRC_INVALID;
+ return true;
+ default:
+ /* Unsupported clock source id */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+ enum encoder_id id)
+{
+ switch (id) {
+ case ENCODER_ID_INTERNAL_LVDS:
+ return ENCODER_OBJECT_ID_INTERNAL_LVDS;
+ case ENCODER_ID_INTERNAL_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS1;
+ case ENCODER_ID_INTERNAL_TMDS2:
+ return ENCODER_OBJECT_ID_INTERNAL_TMDS2;
+ case ENCODER_ID_INTERNAL_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC1;
+ case ENCODER_ID_INTERNAL_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_DAC2;
+ case ENCODER_ID_INTERNAL_LVTM1:
+ return ENCODER_OBJECT_ID_INTERNAL_LVTM1;
+ case ENCODER_ID_INTERNAL_HDMI:
+ return ENCODER_OBJECT_ID_HDMI_INTERNAL;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return ENCODER_OBJECT_ID_TRAVIS;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ return ENCODER_OBJECT_ID_NUTMEG;
+ case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1;
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2;
+ case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+ return ENCODER_OBJECT_ID_MVPU_FPGA;
+ case ENCODER_ID_INTERNAL_DDI:
+ return ENCODER_OBJECT_ID_INTERNAL_DDI;
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY;
+ case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+ return ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY1;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY2;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ return ENCODER_OBJECT_ID_INTERNAL_UNIPHY3;
+ case ENCODER_ID_INTERNAL_WIRELESS:
+ return ENCODER_OBJECT_ID_INTERNAL_VCE;
+ case ENCODER_ID_INTERNAL_VIRTUAL:
+ return ENCODER_OBJECT_ID_NONE;
+ case ENCODER_ID_UNKNOWN:
+ return ENCODER_OBJECT_ID_NONE;
+ default:
+ /* Invalid encoder id */
+ BREAK_TO_DEBUGGER();
+ return ENCODER_OBJECT_ID_NONE;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
new file mode 100644
index 000000000000..9f587c91d843
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_H__
+#define __DAL_COMMAND_TABLE_HELPER2_H__
+
+#include "dce80/command_table_helper_dce80.h"
+#include "dce110/command_table_helper_dce110.h"
+#include "dce112/command_table_helper2_dce112.h"
+
+struct command_table_helper {
+ bool (*controller_id_to_atom)(enum controller_id id, uint8_t *atom_id);
+ uint8_t (*encoder_action_to_atom)(
+ enum bp_encoder_control_action action);
+ uint32_t (*encoder_mode_bp_to_atom)(enum signal_type s,
+ bool enable_dp_audio);
+ bool (*engine_bp_to_atom)(enum engine_id engine_id,
+ uint32_t *atom_engine_id);
+ bool (*clock_source_id_to_atom)(enum clock_source_id id,
+ uint32_t *atom_pll_id);
+ bool (*clock_source_id_to_ref_clk_src)(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+ uint8_t (*transmitter_bp_to_atom)(enum transmitter t);
+ uint8_t (*encoder_id_to_atom)(enum encoder_id id);
+ uint8_t (*clock_source_id_to_atom_phy_clk_src_id)(
+ enum clock_source_id id);
+ uint8_t (*signal_type_to_atom_dig_mode)(enum signal_type s);
+ uint8_t (*hpd_sel_to_atom)(enum hpd_source_id id);
+ uint8_t (*dig_encoder_sel_to_atom)(enum engine_id engine_id);
+ uint8_t (*phy_id_to_atom)(enum transmitter t);
+ uint8_t (*disp_power_gating_action_to_atom)(
+ enum bp_pipe_control_action action);
+ bool (*dc_clock_type_to_atom)(enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type);
+ uint8_t (*transmitter_color_depth_to_atom)(
+ enum transmitter_color_depth id);
+};
+
+bool dal_bios_parser_init_cmd_tbl_helper2(const struct command_table_helper **h,
+ enum dce_version dce);
+
+bool dal_cmd_table_helper_controller_id_to_atom2(
+ enum controller_id id,
+ uint8_t *atom_id);
+
+uint32_t dal_cmd_table_helper_encoder_mode_bp_to_atom2(
+ enum signal_type s,
+ bool enable_dp_audio);
+
+bool dal_cmd_table_helper_clock_source_id_to_ref_clk_src2(
+ enum clock_source_id id,
+ uint32_t *ref_clk_src_id);
+
+uint8_t dal_cmd_table_helper_transmitter_bp_to_atom2(
+ enum transmitter t);
+
+uint8_t dal_cmd_table_helper_encoder_id_to_atom2(
+ enum encoder_id id);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
new file mode 100644
index 000000000000..ca24154468c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ *atom_pll_id = ATOM_PPLL1;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ *atom_pll_id = ATOM_PPLL2;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_EXT_PLL1;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ /* for VCE encoding,
+ * we need to pass in ATOM_PPLL_INVALID
+ */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ /* When programming DP DTO PLL ID should be invalid */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter = NULL,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
new file mode 100644
index 000000000000..eb60c2ead992
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE110_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce110_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
new file mode 100644
index 000000000000..0237ae575068
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper2.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_COMBO_PHY_PLL0:
+ *atom_pll_id = ATOM_COMBOPHY_PLL0;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL1:
+ *atom_pll_id = ATOM_COMBOPHY_PLL1;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL2:
+ *atom_pll_id = ATOM_COMBOPHY_PLL2;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL3:
+ *atom_pll_id = ATOM_COMBOPHY_PLL3;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL4:
+ *atom_pll_id = ATOM_COMBOPHY_PLL4;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL5:
+ *atom_pll_id = ATOM_COMBOPHY_PLL5;
+ break;
+ case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_GCK_DFS;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+ enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type)
+{
+ bool retCode = true;
+
+ if (atom_clock_type != NULL) {
+ switch (id) {
+ case DCECLOCK_TYPE_DISPLAY_CLOCK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+ break;
+
+ case DCECLOCK_TYPE_DPREFCLK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+ break;
+
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+ }
+
+ return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+ uint8_t atomColorDepth = 0;
+
+ switch (id) {
+ case TRANSMITTER_COLOR_DEPTH_24:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_30:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom2,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom2,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom2,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
new file mode 100644
index 000000000000..abf28a06f5bc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER2_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table2(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
new file mode 100644
index 000000000000..452034f83e4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
@@ -0,0 +1,418 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V6_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_V6_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGG_SEL;
+ break;
+ case ENGINE_ID_UNKNOWN:
+ /* No DIG_FRONT is associated to DIG_BACKEND */
+ atom_dig_encoder_sel = 0;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V6__DIGA_SEL;
+ break;
+ }
+
+ return 0;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_COMBO_PHY_PLL0:
+ *atom_pll_id = ATOM_COMBOPHY_PLL0;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL1:
+ *atom_pll_id = ATOM_COMBOPHY_PLL1;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL2:
+ *atom_pll_id = ATOM_COMBOPHY_PLL2;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL3:
+ *atom_pll_id = ATOM_COMBOPHY_PLL3;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL4:
+ *atom_pll_id = ATOM_COMBOPHY_PLL4;
+ break;
+ case CLOCK_SOURCE_COMBO_PHY_PLL5:
+ *atom_pll_id = ATOM_COMBOPHY_PLL5;
+ break;
+ case CLOCK_SOURCE_COMBO_DISPLAY_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_GCK_DFS;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ *atom_pll_id = ATOM_DP_DTO;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ /* Should not happen */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_STREAM_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static bool dc_clock_type_to_atom(
+ enum bp_dce_clock_type id,
+ uint32_t *atom_clock_type)
+{
+ bool retCode = true;
+
+ if (atom_clock_type != NULL) {
+ switch (id) {
+ case DCECLOCK_TYPE_DISPLAY_CLOCK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DISPCLK;
+ break;
+
+ case DCECLOCK_TYPE_DPREFCLK:
+ *atom_clock_type = DCE_CLOCK_TYPE_DPREFCLK;
+ break;
+
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+ }
+
+ return retCode;
+}
+
+static uint8_t transmitter_color_depth_to_atom(enum transmitter_color_depth id)
+{
+ uint8_t atomColorDepth = 0;
+
+ switch (id) {
+ case TRANSMITTER_COLOR_DEPTH_24:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_DIS;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_30:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_5_4;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_36:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_3_2;
+ break;
+ case TRANSMITTER_COLOR_DEPTH_48:
+ atomColorDepth = PIXEL_CLOCK_V7_DEEPCOLOR_RATIO_2_1;
+ break;
+ default:
+ ASSERT_CRITICAL(false); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atomColorDepth;
+}
+
+/* function table */
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter = NULL,
+ .clock_source_id_to_ref_clk_src = NULL,
+ .transmitter_bp_to_atom = NULL,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom = dal_cmd_table_helper_encoder_mode_bp_to_atom,
+ .dc_clock_type_to_atom = dc_clock_type_to_atom,
+ .transmitter_color_depth_to_atom = transmitter_color_depth_to_atom,
+};
+
+/*
+ * dal_cmd_tbl_helper_dce110_get_table
+ *
+ * @brief
+ * Initialize command table helper functions
+ *
+ * @param
+ * const struct command_table_helper **h - [out] struct of functions
+ *
+ */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
new file mode 100644
index 000000000000..dc3660951355
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE112_H__
+
+struct command_table_helper;
+
+/* Initialize command table helper functions */
+const struct command_table_helper *dal_cmd_tbl_helper_dce112_get_table(void);
+
+#endif /* __DAL_COMMAND_TABLE_HELPER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
new file mode 100644
index 000000000000..8b30b558cf1f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "atom.h"
+
+#include "include/grph_object_id.h"
+#include "include/grph_object_defs.h"
+#include "include/bios_parser_types.h"
+
+#include "../command_table_helper.h"
+
+static uint8_t encoder_action_to_atom(enum bp_encoder_control_action action)
+{
+ uint8_t atom_action = 0;
+
+ switch (action) {
+ case ENCODER_CONTROL_ENABLE:
+ atom_action = ATOM_ENABLE;
+ break;
+ case ENCODER_CONTROL_DISABLE:
+ atom_action = ATOM_DISABLE;
+ break;
+ case ENCODER_CONTROL_SETUP:
+ atom_action = ATOM_ENCODER_CMD_SETUP;
+ break;
+ case ENCODER_CONTROL_INIT:
+ atom_action = ATOM_ENCODER_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver.!! */
+ break;
+ }
+
+ return atom_action;
+}
+
+static bool engine_bp_to_atom(enum engine_id id, uint32_t *atom_engine_id)
+{
+ bool result = false;
+
+ if (atom_engine_id != NULL)
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ *atom_engine_id = ASIC_INT_DIG1_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ *atom_engine_id = ASIC_INT_DIG2_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ *atom_engine_id = ASIC_INT_DIG3_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ *atom_engine_id = ASIC_INT_DIG4_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGE:
+ *atom_engine_id = ASIC_INT_DIG5_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGF:
+ *atom_engine_id = ASIC_INT_DIG6_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DIGG:
+ *atom_engine_id = ASIC_INT_DIG7_ENCODER_ID;
+ result = true;
+ break;
+ case ENGINE_ID_DACA:
+ *atom_engine_id = ASIC_INT_DAC1_ENCODER_ID;
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static bool clock_source_id_to_atom(
+ enum clock_source_id id,
+ uint32_t *atom_pll_id)
+{
+ bool result = true;
+
+ if (atom_pll_id != NULL)
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ *atom_pll_id = ATOM_PPLL0;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ *atom_pll_id = ATOM_PPLL1;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ *atom_pll_id = ATOM_PPLL2;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DFS:
+ *atom_pll_id = ATOM_EXT_PLL1;
+ break;
+ case CLOCK_SOURCE_ID_VCE:
+ /* for VCE encoding,
+ * we need to pass in ATOM_PPLL_INVALID
+ */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_DP_DTO:
+ /* When programming DP DTO PLL ID should be invalid */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ break;
+ case CLOCK_SOURCE_ID_UNDEFINED:
+ BREAK_TO_DEBUGGER(); /* check when this will happen! */
+ *atom_pll_id = ATOM_PPLL_INVALID;
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static uint8_t clock_source_id_to_atom_phy_clk_src_id(
+ enum clock_source_id id)
+{
+ uint8_t atom_phy_clk_src_id = 0;
+
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P0PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL1:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ case CLOCK_SOURCE_ID_PLL2:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P2PLL;
+ break;
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT;
+ break;
+ default:
+ atom_phy_clk_src_id = ATOM_TRANSMITTER_CONFIG_V5_P1PLL;
+ break;
+ }
+
+ return atom_phy_clk_src_id >> 2;
+}
+
+static uint8_t signal_type_to_atom_dig_mode(enum signal_type s)
+{
+ uint8_t atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+
+ switch (s) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_LVDS;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_HDMI;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DP_MST;
+ break;
+ default:
+ atom_dig_mode = ATOM_TRANSMITTER_DIGMODE_V5_DVI;
+ break;
+ }
+
+ return atom_dig_mode;
+}
+
+static uint8_t hpd_sel_to_atom(enum hpd_source_id id)
+{
+ uint8_t atom_hpd_sel = 0;
+
+ switch (id) {
+ case HPD_SOURCEID1:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL;
+ break;
+ case HPD_SOURCEID2:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL;
+ break;
+ case HPD_SOURCEID3:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL;
+ break;
+ case HPD_SOURCEID4:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL;
+ break;
+ case HPD_SOURCEID5:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL;
+ break;
+ case HPD_SOURCEID6:
+ atom_hpd_sel = ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL;
+ break;
+ case HPD_SOURCEID_UNKNOWN:
+ default:
+ atom_hpd_sel = 0;
+ break;
+ }
+ return atom_hpd_sel >> 4;
+}
+
+static uint8_t dig_encoder_sel_to_atom(enum engine_id id)
+{
+ uint8_t atom_dig_encoder_sel = 0;
+
+ switch (id) {
+ case ENGINE_ID_DIGA:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ case ENGINE_ID_DIGB:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGB_SEL;
+ break;
+ case ENGINE_ID_DIGC:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGC_SEL;
+ break;
+ case ENGINE_ID_DIGD:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGD_SEL;
+ break;
+ case ENGINE_ID_DIGE:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGE_SEL;
+ break;
+ case ENGINE_ID_DIGF:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGF_SEL;
+ break;
+ case ENGINE_ID_DIGG:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGG_SEL;
+ break;
+ default:
+ atom_dig_encoder_sel = ATOM_TRANMSITTER_V5__DIGA_SEL;
+ break;
+ }
+
+ return atom_dig_encoder_sel;
+}
+
+static uint8_t phy_id_to_atom(enum transmitter t)
+{
+ uint8_t atom_phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYG;
+ break;
+ default:
+ atom_phy_id = ATOM_PHY_ID_UNIPHYA;
+ break;
+ }
+ return atom_phy_id;
+}
+
+static uint8_t disp_power_gating_action_to_atom(
+ enum bp_pipe_control_action action)
+{
+ uint8_t atom_pipe_action = 0;
+
+ switch (action) {
+ case ASIC_PIPE_DISABLE:
+ atom_pipe_action = ATOM_DISABLE;
+ break;
+ case ASIC_PIPE_ENABLE:
+ atom_pipe_action = ATOM_ENABLE;
+ break;
+ case ASIC_PIPE_INIT:
+ atom_pipe_action = ATOM_INIT;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Unhandle action in driver! */
+ break;
+ }
+
+ return atom_pipe_action;
+}
+
+static const struct command_table_helper command_table_helper_funcs = {
+ .controller_id_to_atom = dal_cmd_table_helper_controller_id_to_atom,
+ .encoder_action_to_atom = encoder_action_to_atom,
+ .engine_bp_to_atom = engine_bp_to_atom,
+ .clock_source_id_to_atom = clock_source_id_to_atom,
+ .clock_source_id_to_atom_phy_clk_src_id =
+ clock_source_id_to_atom_phy_clk_src_id,
+ .signal_type_to_atom_dig_mode = signal_type_to_atom_dig_mode,
+ .hpd_sel_to_atom = hpd_sel_to_atom,
+ .dig_encoder_sel_to_atom = dig_encoder_sel_to_atom,
+ .phy_id_to_atom = phy_id_to_atom,
+ .disp_power_gating_action_to_atom = disp_power_gating_action_to_atom,
+ .assign_control_parameter =
+ dal_cmd_table_helper_assign_control_parameter,
+ .clock_source_id_to_ref_clk_src =
+ dal_cmd_table_helper_clock_source_id_to_ref_clk_src,
+ .transmitter_bp_to_atom = dal_cmd_table_helper_transmitter_bp_to_atom,
+ .encoder_id_to_atom = dal_cmd_table_helper_encoder_id_to_atom,
+ .encoder_mode_bp_to_atom =
+ dal_cmd_table_helper_encoder_mode_bp_to_atom,
+};
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void)
+{
+ return &command_table_helper_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
new file mode 100644
index 000000000000..e675c359e306
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+#define __DAL_COMMAND_TABLE_HELPER_DCE80_H__
+
+struct command_table_helper;
+
+const struct command_table_helper *dal_cmd_tbl_helper_dce80_get_table(void);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
new file mode 100644
index 000000000000..41ef35995b02
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the 'calcs' sub-component of DAL.
+# It calculates Bandwidth and Watermarks values for HW programming
+#
+
+CFLAGS_dcn_calcs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_auto.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dcn_calc_math.o := -mhard-float -msse -mpreferred-stack-boundary=4 -Wno-tautological-compare
+
+BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
+
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
+endif
+
+AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
new file mode 100644
index 000000000000..6ca288fb5fb9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "bw_fixed.h"
+
+
+#define MIN_I64 \
+ (int64_t)(-(1LL << 63))
+
+#define MAX_I64 \
+ (int64_t)((1ULL << 63) - 1)
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+static uint64_t abs_i64(int64_t arg)
+{
+ if (arg >= 0)
+ return (uint64_t)(arg);
+ else
+ return (uint64_t)(-arg);
+}
+
+struct bw_fixed bw_int_to_fixed_nonconst(int64_t value)
+{
+ struct bw_fixed res;
+ ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32);
+ res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
+ return res;
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator)
+{
+ struct bw_fixed res;
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+ uint64_t arg1_value;
+ uint64_t arg2_value;
+ uint64_t remainder;
+
+ /* determine integer part */
+ uint64_t res_value;
+
+ ASSERT(denominator != 0);
+
+ arg1_value = abs_i64(numerator);
+ arg2_value = abs_i64(denominator);
+ res_value = div64_u64_rem(arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= BW_FIXED_MAX_I32);
+
+ /* determine fractional part */
+ {
+ uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+ do
+ {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value)
+ {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint64_t summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= MAX_I64 - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (int64_t)(res_value);
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+ return res;
+}
+
+struct bw_fixed bw_floor2(
+ const struct bw_fixed arg,
+ const struct bw_fixed significance)
+{
+ struct bw_fixed result;
+ int64_t multiplicand;
+
+ multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+ result.value = abs_i64(significance.value) * multiplicand;
+ ASSERT(abs_i64(result.value) <= abs_i64(arg.value));
+ return result;
+}
+
+struct bw_fixed bw_ceil2(
+ const struct bw_fixed arg,
+ const struct bw_fixed significance)
+{
+ struct bw_fixed result;
+ int64_t multiplicand;
+
+ multiplicand = div64_s64(arg.value, abs_i64(significance.value));
+ result.value = abs_i64(significance.value) * multiplicand;
+ if (abs_i64(result.value) < abs_i64(arg.value)) {
+ if (arg.value < 0)
+ result.value -= abs_i64(significance.value);
+ else
+ result.value += abs_i64(significance.value);
+ }
+ return result;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ uint64_t arg1_value = abs_i64(arg1.value);
+ uint64_t arg2_value = abs_i64(arg2.value);
+
+ uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value);
+ uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value);
+
+ uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ uint64_t tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= BW_FIXED_MAX_I32);
+
+ res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value));
+
+ ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+ return res;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
new file mode 100644
index 000000000000..7243c37f569e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "custom_float.h"
+
+
+static bool build_custom_float(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ bool *negative,
+ uint32_t *mantissa,
+ uint32_t *exponenta)
+{
+ uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+ const struct fixed31_32 mantissa_constant_plus_max_fraction =
+ dal_fixed31_32_from_fraction(
+ (1LL << (format->mantissa_bits + 1)) - 1,
+ 1LL << format->mantissa_bits);
+
+ struct fixed31_32 mantiss;
+
+ if (dal_fixed31_32_eq(
+ value,
+ dal_fixed31_32_zero)) {
+ *negative = false;
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ if (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_zero)) {
+ *negative = format->sign;
+ value = dal_fixed31_32_neg(value);
+ } else {
+ *negative = false;
+ }
+
+ if (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_one)) {
+ uint32_t i = 1;
+
+ do {
+ value = dal_fixed31_32_shl(value, 1);
+ ++i;
+ } while (dal_fixed31_32_lt(
+ value,
+ dal_fixed31_32_one));
+
+ --i;
+
+ if (exp_offset <= i) {
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ *exponenta = exp_offset - i;
+ } else if (dal_fixed31_32_le(
+ mantissa_constant_plus_max_fraction,
+ value)) {
+ uint32_t i = 1;
+
+ do {
+ value = dal_fixed31_32_shr(value, 1);
+ ++i;
+ } while (dal_fixed31_32_lt(
+ mantissa_constant_plus_max_fraction,
+ value));
+
+ *exponenta = exp_offset + i - 1;
+ } else {
+ *exponenta = exp_offset;
+ }
+
+ mantiss = dal_fixed31_32_sub(
+ value,
+ dal_fixed31_32_one);
+
+ if (dal_fixed31_32_lt(
+ mantiss,
+ dal_fixed31_32_zero) ||
+ dal_fixed31_32_lt(
+ dal_fixed31_32_one,
+ mantiss))
+ mantiss = dal_fixed31_32_zero;
+ else
+ mantiss = dal_fixed31_32_shl(
+ mantiss,
+ format->mantissa_bits);
+
+ *mantissa = dal_fixed31_32_floor(mantiss);
+
+ return true;
+}
+
+static bool setup_custom_float(
+ const struct custom_float_format *format,
+ bool negative,
+ uint32_t mantissa,
+ uint32_t exponenta,
+ uint32_t *result)
+{
+ uint32_t i = 0;
+ uint32_t j = 0;
+
+ uint32_t value = 0;
+
+ /* verification code:
+ * once calculation is ok we can remove it
+ */
+
+ const uint32_t mantissa_mask =
+ (1 << (format->mantissa_bits + 1)) - 1;
+
+ const uint32_t exponenta_mask =
+ (1 << (format->exponenta_bits + 1)) - 1;
+
+ if (mantissa & ~mantissa_mask) {
+ BREAK_TO_DEBUGGER();
+ mantissa = mantissa_mask;
+ }
+
+ if (exponenta & ~exponenta_mask) {
+ BREAK_TO_DEBUGGER();
+ exponenta = exponenta_mask;
+ }
+
+ /* end of verification code */
+
+ while (i < format->mantissa_bits) {
+ uint32_t mask = 1 << i;
+
+ if (mantissa & mask)
+ value |= mask;
+
+ ++i;
+ }
+
+ while (j < format->exponenta_bits) {
+ uint32_t mask = 1 << j;
+
+ if (exponenta & mask)
+ value |= mask << i;
+
+ ++j;
+ }
+
+ if (negative && format->sign)
+ value |= 1 << (i + j);
+
+ *result = value;
+
+ return true;
+}
+
+bool convert_to_custom_float_format(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ uint32_t *result)
+{
+ uint32_t mantissa;
+ uint32_t exponenta;
+ bool negative;
+
+ return build_custom_float(
+ value, format, &negative, &mantissa, &exponenta) &&
+ setup_custom_float(
+ format, negative, mantissa, exponenta, result);
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
new file mode 100644
index 000000000000..6347712db834
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -0,0 +1,3257 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+/*******************************************************************************
+ * Private Functions
+ ******************************************************************************/
+
+static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id)
+{
+ switch (asic_id.chip_family) {
+
+ case FAMILY_CZ:
+ if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_STONEY;
+ return BW_CALCS_VERSION_CARRIZO;
+
+ case FAMILY_VI:
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS10;
+ if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev))
+ return BW_CALCS_VERSION_POLARIS11;
+ return BW_CALCS_VERSION_INVALID;
+
+ case FAMILY_AI:
+ return BW_CALCS_VERSION_VEGA10;
+
+ default:
+ return BW_CALCS_VERSION_INVALID;
+ }
+}
+
+static void calculate_bandwidth(
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ struct bw_calcs_data *data)
+
+{
+ const int32_t pixels_per_chunk = 512;
+ const int32_t high = 2;
+ const int32_t mid = 1;
+ const int32_t low = 0;
+ const uint32_t s_low = 0;
+ const uint32_t s_mid1 = 1;
+ const uint32_t s_mid2 = 2;
+ const uint32_t s_mid3 = 3;
+ const uint32_t s_mid4 = 4;
+ const uint32_t s_mid5 = 5;
+ const uint32_t s_mid6 = 6;
+ const uint32_t s_high = 7;
+ const uint32_t bus_efficiency = 1;
+ const uint32_t dmif_chunk_buff_margin = 1;
+
+ uint32_t max_chunks_fbc_mode;
+ int32_t num_cursor_lines;
+
+ int32_t i, j, k;
+ struct bw_fixed yclk[3];
+ struct bw_fixed sclk[8];
+ bool d0_underlay_enable;
+ bool d1_underlay_enable;
+ bool fbc_enabled;
+ bool lpt_enabled;
+ enum bw_defines sclk_message;
+ enum bw_defines yclk_message;
+ enum bw_defines v_filter_init_mode[maximum_number_of_surfaces];
+ enum bw_defines tiling_mode[maximum_number_of_surfaces];
+ enum bw_defines surface_type[maximum_number_of_surfaces];
+ enum bw_defines voltage;
+ enum bw_defines pipe_check;
+ enum bw_defines hsr_check;
+ enum bw_defines vsr_check;
+ enum bw_defines lb_size_check;
+ enum bw_defines fbc_check;
+ enum bw_defines rotation_check;
+ enum bw_defines mode_check;
+ enum bw_defines nbp_state_change_enable_blank;
+ /*initialize variables*/
+ int32_t number_of_displays_enabled = 0;
+ int32_t number_of_displays_enabled_with_margin = 0;
+ int32_t number_of_aligned_displays_with_no_margin = 0;
+
+ yclk[low] = vbios->low_yclk;
+ yclk[mid] = vbios->mid_yclk;
+ yclk[high] = vbios->high_yclk;
+ sclk[s_low] = vbios->low_sclk;
+ sclk[s_mid1] = vbios->mid1_sclk;
+ sclk[s_mid2] = vbios->mid2_sclk;
+ sclk[s_mid3] = vbios->mid3_sclk;
+ sclk[s_mid4] = vbios->mid4_sclk;
+ sclk[s_mid5] = vbios->mid5_sclk;
+ sclk[s_mid6] = vbios->mid6_sclk;
+ sclk[s_high] = vbios->high_sclk;
+ /*''''''''''''''''''*/
+ /* surface assignment:*/
+ /* 0: d0 underlay or underlay luma*/
+ /* 1: d0 underlay chroma*/
+ /* 2: d1 underlay or underlay luma*/
+ /* 3: d1 underlay chroma*/
+ /* 4: d0 graphics*/
+ /* 5: d1 graphics*/
+ /* 6: d2 graphics*/
+ /* 7: d3 graphics, same mode as d2*/
+ /* 8: d4 graphics, same mode as d2*/
+ /* 9: d5 graphics, same mode as d2*/
+ /* ...*/
+ /* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/
+ /* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/
+ /* underlay luma and chroma surface parameters from spreadsheet*/
+
+
+
+
+ if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; }
+ else {
+ d0_underlay_enable = 1;
+ }
+ if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; }
+ else {
+ d1_underlay_enable = 1;
+ }
+ data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable;
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ surface_type[0] = bw_def_underlay420_luma;
+ surface_type[2] = bw_def_underlay420_luma;
+ data->bytes_per_pixel[0] = 1;
+ data->bytes_per_pixel[2] = 1;
+ surface_type[1] = bw_def_underlay420_chroma;
+ surface_type[3] = bw_def_underlay420_chroma;
+ data->bytes_per_pixel[1] = 2;
+ data->bytes_per_pixel[3] = 2;
+ data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component;
+ data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component;
+ break;
+ case bw_def_422:
+ surface_type[0] = bw_def_underlay422;
+ surface_type[2] = bw_def_underlay422;
+ data->bytes_per_pixel[0] = 2;
+ data->bytes_per_pixel[2] = 2;
+ data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component;
+ data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component;
+ break;
+ default:
+ surface_type[0] = bw_def_underlay444;
+ surface_type[2] = bw_def_underlay444;
+ data->bytes_per_pixel[0] = 4;
+ data->bytes_per_pixel[2] = 4;
+ data->lb_size_per_component[0] = dceip->lb_size_per_component444;
+ data->lb_size_per_component[2] = dceip->lb_size_per_component444;
+ break;
+ }
+ if (d0_underlay_enable) {
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ data->enable[0] = 1;
+ data->enable[1] = 1;
+ break;
+ default:
+ data->enable[0] = 1;
+ data->enable[1] = 0;
+ break;
+ }
+ }
+ else {
+ data->enable[0] = 0;
+ data->enable[1] = 0;
+ }
+ if (d1_underlay_enable) {
+ switch (data->underlay_surface_type) {
+ case bw_def_420:
+ data->enable[2] = 1;
+ data->enable[3] = 1;
+ break;
+ default:
+ data->enable[2] = 1;
+ data->enable[3] = 0;
+ break;
+ }
+ }
+ else {
+ data->enable[2] = 0;
+ data->enable[3] = 0;
+ }
+ data->use_alpha[0] = 0;
+ data->use_alpha[1] = 0;
+ data->use_alpha[2] = 0;
+ data->use_alpha[3] = 0;
+ data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable;
+ data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable;
+ /*underlay0 same and graphics display pipe0*/
+ data->interlace_mode[0] = data->interlace_mode[4];
+ data->interlace_mode[1] = data->interlace_mode[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->interlace_mode[2] = data->interlace_mode[5];
+ data->interlace_mode[3] = data->interlace_mode[5];
+ /*underlay0 same and graphics display pipe0*/
+ data->h_total[0] = data->h_total[4];
+ data->v_total[0] = data->v_total[4];
+ data->h_total[1] = data->h_total[4];
+ data->v_total[1] = data->v_total[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->h_total[2] = data->h_total[5];
+ data->v_total[2] = data->v_total[5];
+ data->h_total[3] = data->h_total[5];
+ data->v_total[3] = data->v_total[5];
+ /*underlay0 same and graphics display pipe0*/
+ data->pixel_rate[0] = data->pixel_rate[4];
+ data->pixel_rate[1] = data->pixel_rate[4];
+ /*underlay1 same and graphics display pipe1*/
+ data->pixel_rate[2] = data->pixel_rate[5];
+ data->pixel_rate[3] = data->pixel_rate[5];
+ if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) {
+ tiling_mode[0] = bw_def_linear;
+ tiling_mode[1] = bw_def_linear;
+ tiling_mode[2] = bw_def_linear;
+ tiling_mode[3] = bw_def_linear;
+ }
+ else {
+ tiling_mode[0] = bw_def_landscape;
+ tiling_mode[1] = bw_def_landscape;
+ tiling_mode[2] = bw_def_landscape;
+ tiling_mode[3] = bw_def_landscape;
+ }
+ data->lb_bpc[0] = data->underlay_lb_bpc;
+ data->lb_bpc[1] = data->underlay_lb_bpc;
+ data->lb_bpc[2] = data->underlay_lb_bpc;
+ data->lb_bpc[3] = data->underlay_lb_bpc;
+ data->compression_rate[0] = bw_int_to_fixed(1);
+ data->compression_rate[1] = bw_int_to_fixed(1);
+ data->compression_rate[2] = bw_int_to_fixed(1);
+ data->compression_rate[3] = bw_int_to_fixed(1);
+ data->access_one_channel_only[0] = 0;
+ data->access_one_channel_only[1] = 0;
+ data->access_one_channel_only[2] = 0;
+ data->access_one_channel_only[3] = 0;
+ data->cursor_width_pixels[0] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[1] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[2] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[3] = bw_int_to_fixed(0);
+ /* graphics surface parameters from spreadsheet*/
+ fbc_enabled = 0;
+ lpt_enabled = 0;
+ for (i = 4; i <= maximum_number_of_surfaces - 3; i++) {
+ if (i < data->number_of_displays + 4) {
+ if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 4 && data->d0_underlay_mode == bw_def_blend) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 1;
+ }
+ else if (i == 4) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ else if (i == 5 && data->d1_underlay_mode == bw_def_blend) {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 1;
+ }
+ else {
+ data->enable[i] = 1;
+ data->use_alpha[i] = 0;
+ }
+ }
+ else {
+ data->enable[i] = 0;
+ data->use_alpha[i] = 0;
+ }
+ data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable;
+ surface_type[i] = bw_def_graphics;
+ data->lb_size_per_component[i] = dceip->lb_size_per_component444;
+ if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) {
+ tiling_mode[i] = bw_def_linear;
+ }
+ else {
+ tiling_mode[i] = bw_def_tiled;
+ }
+ data->lb_bpc[i] = data->graphics_lb_bpc;
+ if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) {
+ data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate);
+ data->access_one_channel_only[i] = data->lpt_en[i];
+ }
+ else {
+ data->compression_rate[i] = bw_int_to_fixed(1);
+ data->access_one_channel_only[i] = 0;
+ }
+ if (data->fbc_en[i] == 1) {
+ fbc_enabled = 1;
+ if (data->lpt_en[i] == 1) {
+ lpt_enabled = 1;
+ }
+ }
+ data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
+ }
+ /* display_write_back420*/
+ data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0;
+ data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0;
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->enable[maximum_number_of_surfaces - 2] = 1;
+ data->enable[maximum_number_of_surfaces - 1] = 1;
+ }
+ else {
+ data->enable[maximum_number_of_surfaces - 2] = 0;
+ data->enable[maximum_number_of_surfaces - 1] = 0;
+ }
+ surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma;
+ surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma;
+ data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component;
+ data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component;
+ data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1;
+ data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2;
+ data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5];
+ data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5];
+ data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+ data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+ tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear;
+ tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear;
+ data->lb_bpc[maximum_number_of_surfaces - 2] = 8;
+ data->lb_bpc[maximum_number_of_surfaces - 1] = 8;
+ data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0;
+ data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0;
+ /*assume display pipe1 has dwb enabled*/
+ data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5];
+ data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5];
+ data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5];
+ data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5];
+ data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5];
+ data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5];
+ data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5];
+ data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5];
+ data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5];
+ data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5];
+ data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5];
+ data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5];
+ data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1);
+ data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1);
+ data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono;
+ data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono;
+ data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0);
+ data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0);
+ data->use_alpha[maximum_number_of_surfaces - 2] = 0;
+ data->use_alpha[maximum_number_of_surfaces - 1] = 0;
+ /*mode check calculations:*/
+ /* mode within dce ip capabilities*/
+ /* fbc*/
+ /* hsr*/
+ /* vsr*/
+ /* lb size*/
+ /*effective scaling source and ratios:*/
+ /*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/
+ /*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/
+ /*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/
+ /*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/
+ /*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/
+ /*in interlace mode there is 2:1 vertical downscaling for each field*/
+ /*in panning or bezel adjustment mode the source width has an extra 128 pixels*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) {
+ data->h_taps[i] = bw_int_to_fixed(1);
+ data->v_taps[i] = bw_int_to_fixed(1);
+ }
+ if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) {
+ data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2));
+ data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2));
+ data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2));
+ data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2));
+ data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2));
+ }
+ else {
+ data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i];
+ data->src_width_after_surface_type = data->src_width[i];
+ data->src_height_after_surface_type = data->src_height[i];
+ data->hsr_after_surface_type = data->h_scale_ratio[i];
+ data->vsr_after_surface_type = data->v_scale_ratio[i];
+ }
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->src_width_after_rotation = data->src_height_after_surface_type;
+ data->src_height_after_rotation = data->src_width_after_surface_type;
+ data->hsr_after_rotation = data->vsr_after_surface_type;
+ data->vsr_after_rotation = data->hsr_after_surface_type;
+ }
+ else {
+ data->src_width_after_rotation = data->src_width_after_surface_type;
+ data->src_height_after_rotation = data->src_height_after_surface_type;
+ data->hsr_after_rotation = data->hsr_after_surface_type;
+ data->vsr_after_rotation = data->vsr_after_surface_type;
+ }
+ switch (data->stereo_mode[i]) {
+ case bw_def_top_bottom:
+ data->source_width_pixels[i] = data->src_width_after_rotation;
+ data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation);
+ data->hsr_after_stereo = data->hsr_after_rotation;
+ data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation);
+ break;
+ case bw_def_side_by_side:
+ data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation);
+ data->source_height_pixels = data->src_height_after_rotation;
+ data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation);
+ data->vsr_after_stereo = data->vsr_after_rotation;
+ break;
+ default:
+ data->source_width_pixels[i] = data->src_width_after_rotation;
+ data->source_height_pixels = data->src_height_after_rotation;
+ data->hsr_after_stereo = data->hsr_after_rotation;
+ data->vsr_after_stereo = data->vsr_after_rotation;
+ break;
+ }
+ data->hsr[i] = data->hsr_after_stereo;
+ if (data->interlace_mode[i]) {
+ data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2));
+ }
+ else {
+ data->vsr[i] = data->vsr_after_stereo;
+ }
+ if (data->panning_and_bezel_adjustment != bw_def_none) {
+ data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256));
+ }
+ else {
+ data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128));
+ }
+ data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels;
+ }
+ }
+ /*mode support checks:*/
+ /*the number of graphics and underlay pipes is limited by the ip support*/
+ /*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/
+ /*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/
+ /*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/
+ /*the number of lines in the line buffer has to exceed the number of vertical taps*/
+ /*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/
+ /*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+ /*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/
+ /*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/
+ /*rotation is not supported with linear of stereo modes*/
+ if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) {
+ pipe_check = bw_def_ok;
+ }
+ else {
+ pipe_check = bw_def_notok;
+ }
+ hsr_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
+ if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
+ hsr_check = bw_def_hsr_mtn_4;
+ }
+ else {
+ if (bw_mtn(data->hsr[i], data->h_taps[i])) {
+ hsr_check = bw_def_hsr_mtn_h_taps;
+ }
+ else {
+ if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
+ hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr;
+ }
+ }
+ }
+ }
+ }
+ }
+ vsr_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) {
+ if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) {
+ vsr_check = bw_def_vsr_mtn_4;
+ }
+ else {
+ if (bw_mtn(data->vsr[i], data->v_taps[i])) {
+ vsr_check = bw_def_vsr_mtn_v_taps;
+ }
+ }
+ }
+ }
+ }
+ lb_size_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
+ data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
+ }
+ else {
+ data->source_width_in_lb = data->source_width_pixels[i];
+ }
+ switch (data->lb_bpc[i]) {
+ case 8:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+ break;
+ case 10:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48));
+ break;
+ default:
+ data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48));
+ break;
+ }
+ data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1));
+ /*clamp the partitions to the maxium number supported by the lb*/
+ if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+ data->lb_partitions_max[i] = bw_int_to_fixed(10);
+ }
+ else {
+ data->lb_partitions_max[i] = bw_int_to_fixed(7);
+ }
+ data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]);
+ if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) {
+ lb_size_check = bw_def_notok;
+ }
+ }
+ }
+ fbc_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) {
+ fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo;
+ }
+ }
+ rotation_check = bw_def_ok;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) {
+ rotation_check = bw_def_invalid_linear_or_stereo_mode;
+ }
+ }
+ }
+ if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) {
+ mode_check = bw_def_ok;
+ }
+ else {
+ mode_check = bw_def_notok;
+ }
+ /*number of memory channels for write-back client*/
+ data->number_of_dram_wrchannels = vbios->number_of_dram_channels;
+ data->number_of_dram_channels = vbios->number_of_dram_channels;
+ /*modify number of memory channels if lpt mode is enabled*/
+ /* low power tiling mode register*/
+ /* 0 = use channel 0*/
+ /* 1 = use channel 0 and 1*/
+ /* 2 = use channel 0,1,2,3*/
+ if ((fbc_enabled == 1 && lpt_enabled == 1)) {
+ data->dram_efficiency = bw_int_to_fixed(1);
+ if (dceip->low_power_tiling_mode == 0) {
+ data->number_of_dram_channels = 1;
+ }
+ else if (dceip->low_power_tiling_mode == 1) {
+ data->number_of_dram_channels = 2;
+ }
+ else if (dceip->low_power_tiling_mode == 2) {
+ data->number_of_dram_channels = 4;
+ }
+ else {
+ data->number_of_dram_channels = 1;
+ }
+ }
+ else {
+ data->dram_efficiency = bw_frc_to_fixed(8, 10);
+ }
+ /*memory request size and latency hiding:*/
+ /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/
+ /*the display write-back requests are single line*/
+ /*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/
+ /*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) {
+ if ((i < 4)) {
+ /*underlay portrait tiling mode is not supported*/
+ data->orthogonal_rotation[i] = 1;
+ }
+ else {
+ /*graphics portrait tiling mode*/
+ if ((data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ }
+ else {
+ if ((i < 4)) {
+ /*underlay landscape tiling mode is only supported*/
+ if ((data->underlay_micro_tile_mode == bw_def_display_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ else {
+ /*graphics landscape tiling mode*/
+ if ((data->graphics_micro_tile_mode == bw_def_display_micro_tiling)) {
+ data->orthogonal_rotation[i] = 0;
+ }
+ else {
+ data->orthogonal_rotation[i] = 1;
+ }
+ }
+ }
+ if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) {
+ data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling;
+ }
+ else {
+ data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling;
+ }
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(1);
+ }
+ else if (tiling_mode[i] == bw_def_linear) {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ }
+ else {
+ if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) {
+ switch (data->bytes_per_pixel[i]) {
+ case 8:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ if (data->orthogonal_rotation[i]) {
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+ }
+ else {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ }
+ break;
+ case 4:
+ if (data->orthogonal_rotation[i]) {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+ }
+ else {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ }
+ break;
+ case 2:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(32);
+ break;
+ default:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ data->bytes_per_request[i] = bw_int_to_fixed(32);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(16);
+ break;
+ }
+ }
+ else {
+ data->bytes_per_request[i] = bw_int_to_fixed(64);
+ data->useful_bytes_per_request[i] = bw_int_to_fixed(64);
+ if (data->orthogonal_rotation[i]) {
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ }
+ else {
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(2);
+ break;
+ case 2:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ break;
+ default:
+ data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8);
+ data->latency_hiding_lines[i] = bw_int_to_fixed(4);
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ /*requested peak bandwidth:*/
+ /*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/
+ /*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/
+ /*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/
+ /**/
+ /*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/
+ /*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/
+ /**/
+ /*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/
+ /*rounded up to even and divided by the line times for initialization, which is normally three.*/
+ /*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/
+ /*rounded up to line pairs if not doing line buffer prefetching.*/
+ /**/
+ /*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/
+ /*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/
+ /**/
+ /*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/
+ /*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/
+ /*panning/bezel adjustment mode.*/
+ /**/
+ /*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/
+ /*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/
+ /*furthermore, there is only one line time for initialization.*/
+ /**/
+ /*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/
+ /*the ceiling of the vertical scale ratio.*/
+ /**/
+ /*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/
+ /**/
+ /*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/
+ /*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/
+ /*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1));
+ if (data->panning_and_bezel_adjustment == bw_def_any_lines) {
+ data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1));
+ }
+ if (data->stereo_mode[i] == bw_def_top_bottom) {
+ v_filter_init_mode[i] = bw_def_manual;
+ data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4));
+ }
+ else {
+ v_filter_init_mode[i] = bw_def_auto;
+ }
+ if (data->stereo_mode[i] == bw_def_top_bottom) {
+ data->num_lines_at_frame_start = bw_int_to_fixed(1);
+ }
+ else {
+ data->num_lines_at_frame_start = bw_int_to_fixed(3);
+ }
+ if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) {
+ data->line_buffer_prefetch[i] = 0;
+ }
+ else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) {
+ data->line_buffer_prefetch[i] = 1;
+ }
+ else {
+ data->line_buffer_prefetch[i] = 0;
+ }
+ data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start);
+ if (data->line_buffer_prefetch[i] == 1) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]);
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1);
+ } else if (bw_leq(data->vsr[i],
+ bw_frc_to_fixed(4, 3))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3));
+ } else if (bw_leq(data->vsr[i],
+ bw_frc_to_fixed(6, 4))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4));
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2);
+ }
+ else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3);
+ }
+ else {
+ data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4);
+ }
+ if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) {
+ data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1);
+ }
+ else {
+ data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
+ }
+ data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]);
+ data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]);
+ }
+ }
+ /*outstanding chunk request limit*/
+ /*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/
+ /*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/
+ /**/
+ /*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/
+ /**/
+ /*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/
+ /*and underlay.*/
+ /**/
+ /*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/
+ /*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/
+ /*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/
+ /*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/
+ /*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/
+ /*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) {
+ data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin;
+ }
+ else {
+ data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin;
+ }
+ }
+ if (data->fbc_en[i] == 1) {
+ max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin;
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ switch (surface_type[i]) {
+ case bw_def_display_write_back420_luma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size);
+ break;
+ case bw_def_display_write_back420_chroma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size);
+ break;
+ case bw_def_underlay420_luma:
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+ break;
+ case bw_def_underlay420_chroma:
+ data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2));
+ break;
+ case bw_def_underlay422:case bw_def_underlay444:
+ if (data->orthogonal_rotation[i] == 0) {
+ data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size);
+ }
+ else {
+ data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size));
+ }
+ break;
+ default:
+ if (data->fbc_en[i] == 1) {
+ /*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/
+ if (data->number_of_displays == 1) {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+ }
+ else {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+ }
+ }
+ else {
+ /*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/
+ if (data->number_of_displays == 1) {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size)));
+ }
+ else {
+ data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size));
+ }
+ }
+ break;
+ }
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+ data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024);
+ }
+ else {
+ data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ }
+ }
+ data->min_dmif_size_in_time = bw_int_to_fixed(9999);
+ data->min_mcifwr_size_in_time = bw_int_to_fixed(9999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) {
+ data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+ }
+ }
+ else {
+ if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) {
+ data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]);
+ }
+ }
+ }
+ }
+ data->total_requests_for_dmif_size = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i]));
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) {
+ data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i]));
+ }
+ else {
+ data->adjusted_data_buffer_size[i] = data->data_buffer_size[i];
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0)) {
+ /*set maximum chunk limit if only one graphic pipe is enabled*/
+ data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127);
+ }
+ else {
+ data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1));
+ /*clamp maximum chunk limit in the graphic display pipe*/
+ if ((i >= 4)) {
+ data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]);
+ }
+ }
+ }
+ }
+ /*outstanding pte request limit*/
+ /*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/
+ /*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/
+ /*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/
+ /*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/
+ /*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/
+ /*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/
+ /*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/
+ /*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/
+ if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) {
+ data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+ }
+ else {
+ data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+ if (tiling_mode[i] == bw_def_linear) {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+ data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(1);
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(1);
+ data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode);
+ }
+ else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(8);
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ case 2:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ default:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ }
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+ data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+ }
+ else {
+ data->useful_pte_per_pte_request = bw_int_to_fixed(1);
+ switch (data->bytes_per_pixel[i]) {
+ case 4:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(32);
+ break;
+ case 2:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(32);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ default:
+ data->scatter_gather_page_width[i] = bw_int_to_fixed(64);
+ data->scatter_gather_page_height[i] = bw_int_to_fixed(64);
+ break;
+ }
+ data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode);
+ data->scatter_gather_row_height = data->scatter_gather_page_height[i];
+ }
+ data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request);
+ data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]);
+ data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]);
+ if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) {
+ data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank;
+ }
+ else {
+ data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1))));
+ }
+ }
+ }
+ /*pitch padding recommended for efficiency in linear mode*/
+ /*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/
+ /*if that is the case it is recommended to pad the pitch by at least 256 pixels*/
+ data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels));
+
+ /*pixel transfer time*/
+ /*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/
+ /*for dmif, pte and cursor requests have to be included.*/
+ /*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/
+ /*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/
+ /*the page close-open time is determined by trc and the number of page close-opens*/
+ /*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/
+ /*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/
+ /*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/
+ /*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/
+ /*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/
+ /*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/
+ /*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/
+ /*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/
+ /*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/
+ /*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/
+ /*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/
+ /*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/
+ /*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/
+ data->cursor_total_data = bw_int_to_fixed(0);
+ data->cursor_total_request_groups = bw_int_to_fixed(0);
+ data->scatter_gather_total_pte_requests = bw_int_to_fixed(0);
+ data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4)));
+ if (dceip->large_cursor == 1) {
+ data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1)));
+ }
+ else {
+ data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1)));
+ }
+ if (data->scatter_gather_enable_for_pipe[i]) {
+ data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]);
+ data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1)));
+ }
+ }
+ }
+ data->tile_width_in_pixels = bw_int_to_fixed(8);
+ data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+ data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) {
+ data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i])));
+ }
+ else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) {
+ data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice;
+ }
+ else {
+ data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i];
+ }
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+ }
+ else {
+ data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open));
+ }
+ }
+ }
+ data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000));
+ data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ }
+ }
+ data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i]));
+ }
+ }
+ }
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
+ data->total_display_reads_required_data = bw_int_to_fixed(0);
+ data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
+ data->total_display_writes_required_data = bw_int_to_fixed(0);
+ data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i];
+ /*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/
+ /*pseudo-channel may be read independently of one another.*/
+ /*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/
+ /*the 64 or 32 byte sized data is stored in one pseudo-channel.*/
+ /*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/
+ /*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/
+ /*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/
+ /*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/
+ /*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/
+ /*the memory efficiency will be 50% for the 32 byte sized data.*/
+ if (vbios->memory_type == bw_def_hbm) {
+ data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i];
+ }
+ else {
+ data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1)));
+ }
+ data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data);
+ data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data);
+ }
+ else {
+ data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]);
+ data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1))));
+ }
+ }
+ }
+ data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+ data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64)));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) {
+ data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]);
+ }
+ else {
+ if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) {
+ data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512);
+ }
+ else {
+ data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0);
+ }
+ }
+ data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i])));
+ data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
+ }
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_int_to_fixed(bus_efficiency)))));
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ for (j = 0; j <= 2; j++) {
+ for (k = 0; k <= 7; k++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ /*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/
+ /*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/
+ data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor)))));
+ data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i]));
+ /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
+ /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
+ /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+ /*immediately serviced without a gap in the urgent requests.*/
+ /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+ if (surface_type[i] == bw_def_graphics) {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ if (data->use_alpha[i] == 1) {
+ data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+ }
+ }
+ else {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = bw_int_to_fixed(3);
+ break;
+ }
+ }
+ if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+ data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+ }
+ else {
+ data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+ }
+ data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))));
+ }
+ else {
+ data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]));
+ /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/
+ /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/
+ /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/
+ /*immediately serviced without a gap in the urgent requests.*/
+ /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/
+ data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))));
+ }
+ }
+ }
+ }
+ }
+ /*cpu c-state and p-state change enable*/
+ /*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/
+ /*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/
+ /*condition for the blackout duration:*/
+ /* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/
+ /*condition for the blackout recovery:*/
+ /* recovery time > dmif burst time + 2 * urgent latency*/
+ /* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/
+ /* / (dispclk - display bw)*/
+ /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/
+ /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) {
+ if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) {
+ data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+ }
+ else {
+ data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]);
+ }
+ }
+ else {
+ data->cursor_latency_hiding[i] = bw_int_to_fixed(9999);
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) {
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ }
+ else {
+ data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency);
+ }
+ data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]);
+ }
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999);
+ data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0);
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+ data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k]))));
+ if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+ }
+ else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+ }
+ }
+ else {
+ data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j]));
+ data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999);
+ }
+ else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) {
+ data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k]));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) {
+ data->cpup_state_change_enable = bw_def_yes;
+ if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) {
+ data->cpuc_state_change_enable = bw_def_yes;
+ }
+ else {
+ data->cpuc_state_change_enable = bw_def_no;
+ }
+ }
+ else {
+ data->cpup_state_change_enable = bw_def_no;
+ data->cpuc_state_change_enable = bw_def_no;
+ }
+ /*nb p-state change enable*/
+ /*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/
+ /*below the maximum.*/
+ /*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/
+ /*minus the dmif burst time, minus the source line transfer time*/
+ /*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/
+ /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) {
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ }
+ else {
+ /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) * h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/
+ data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], bw_mul(bw_frc_to_fixed(8, 10), data->total_dmifmc_urgent_latency));
+ }
+ data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]);
+ }
+ }
+ /*initialize variables*/
+ number_of_displays_enabled = 0;
+ number_of_displays_enabled_with_margin = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ number_of_displays_enabled = number_of_displays_enabled + 1;
+ }
+ data->display_pstate_change_enable[k] = 0;
+ }
+ for (i = 0; i <= 2; i++) {
+ for (j = 0; j <= 7; j++) {
+ data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999);
+ data->dram_speed_change_margin = bw_int_to_fixed(9999);
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0);
+ data->num_displays_with_margin[i][j] = 0;
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+ if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+ /*determine the minimum dram clock change margin for each set of clock frequencies*/
+ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+ /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->display_pstate_change_enable[k] = 1;
+ data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ }
+ }
+ }
+ else {
+ data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]);
+ if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) {
+ /*determine the minimum dram clock change margin for each display pipe*/
+ data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin);
+ /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/
+ data->dispclk_required_for_dram_speed_change[i][j] = bw_max3(data->dispclk_required_for_dram_speed_change[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k]))));
+ if ((bw_ltn(data->dispclk_required_for_dram_speed_change[i][j], vbios->high_voltage_max_dispclk))) {
+ data->display_pstate_change_enable[k] = 1;
+ data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ /*determine the number of displays with margin to switch in the v_active region*/
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if ((data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1)) {
+ number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1;
+ }
+ }
+ /*determine the number of displays that don't have any dram clock change margin, but*/
+ /*have the same resolution. these displays can switch in a common vblank region if*/
+ /*their frames are aligned.*/
+ data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k]) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+ data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+ }
+ else {
+ data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]);
+ data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]);
+ }
+ }
+ }
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ data->displays_with_same_mode[i] = bw_int_to_fixed(0);
+ if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) {
+ for (j = 0; j <= maximum_number_of_surfaces - 1; j++) {
+ if ((data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
+ data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1));
+ }
+ }
+ }
+ }
+ /*compute the maximum number of aligned displays with no margin*/
+ number_of_aligned_displays_with_no_margin = 0;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i]));
+ }
+ /*dram clock change is possible, if all displays have positive margin except for one display or a group of*/
+ /*aligned displays with the same timing.*/
+ /*the display(s) with the negative margin can be switched in the v_blank region while the other*/
+ /*displays are in v_blank or v_active.*/
+ if ((number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk))) {
+ data->nbp_state_change_enable = bw_def_yes;
+ }
+ else {
+ data->nbp_state_change_enable = bw_def_no;
+ }
+ /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/
+ if ((number_of_aligned_displays_with_no_margin == number_of_displays_enabled)) {
+ nbp_state_change_enable_blank = bw_def_yes;
+ }
+ else {
+ nbp_state_change_enable_blank = bw_def_no;
+ }
+ /*required yclk(pclk)*/
+ /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/
+ /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/
+ /*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/
+ data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999);
+ /* number of cursor lines stored in the cursor data return buffer*/
+ num_cursor_lines = 0;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) {
+ /*compute number of cursor lines stored in data return buffer*/
+ if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) {
+ num_cursor_lines = 4;
+ }
+ else {
+ num_cursor_lines = 2;
+ }
+ data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i]));
+ }
+ }
+ }
+ /*compute minimum time to read one chunk from the dmif buffer*/
+ if ((number_of_displays_enabled > 2)) {
+ data->chunk_request_delay = 0;
+ }
+ else {
+ data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk));
+ }
+ data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time);
+ data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay));
+ data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency);
+ data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer);
+ data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer);
+ data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips);
+ data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time);
+ if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+ data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+ yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+ data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999);
+ yclk_message = bw_def_exceeded_allowed_page_close_open;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else {
+ data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000));
+ if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) {
+ yclk_message = bw_fixed_to_int(vbios->low_yclk);
+ data->y_clk_level = low;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) {
+ yclk_message = bw_fixed_to_int(vbios->mid_yclk);
+ data->y_clk_level = mid;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else if (bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) {
+ yclk_message = bw_fixed_to_int(vbios->high_yclk);
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ else {
+ yclk_message = bw_def_exceeded_allowed_maximum_bw;
+ data->y_clk_level = high;
+ data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels));
+ }
+ }
+ /*required sclk*/
+ /*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/
+ /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/
+ /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/
+ /*for dmif, pte and cursor requests have to be included.*/
+ data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) {
+ data->required_sclk = bw_int_to_fixed(9999);
+ sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size;
+ data->sclk_level = s_high;
+ }
+ else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) {
+ data->required_sclk = bw_int_to_fixed(9999);
+ sclk_message = bw_def_exceeded_allowed_page_close_open;
+ data->sclk_level = s_high;
+ }
+ else {
+ data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk);
+ if (bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_low;
+ data->sclk_level = s_low;
+ data->required_sclk = vbios->low_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid1;
+ data->required_sclk = vbios->mid1_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid2;
+ data->required_sclk = vbios->mid2_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid3;
+ data->required_sclk = vbios->mid3_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid4;
+ data->required_sclk = vbios->mid4_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid5;
+ data->required_sclk = vbios->mid5_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) {
+ sclk_message = bw_def_mid;
+ data->sclk_level = s_mid6;
+ data->required_sclk = vbios->mid6_sclk;
+ }
+ else if (bw_ltn(data->required_sclk, sclk[s_high])) {
+ sclk_message = bw_def_high;
+ data->sclk_level = s_high;
+ data->required_sclk = vbios->high_sclk;
+ }
+ else {
+ sclk_message = bw_def_exceeded_allowed_maximum_sclk;
+ data->sclk_level = s_high;
+ /*required_sclk = high_sclk*/
+ }
+ }
+ /*dispclk*/
+ /*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/
+ /*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/
+ /*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/
+ /*if that does not happen either, dispclk required is the dispclk required with ramping.*/
+ /*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/
+ /*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/
+ /*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/
+ /*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/
+ /*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/
+ /*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/
+ /*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/
+ /*the scaling limits factor itself it also clamped to at least 1*/
+ /*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/
+ data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100)));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] == bw_def_graphics) {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ if (data->use_alpha[i] == 1) {
+ data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency);
+ }
+ }
+ else {
+ switch (data->lb_bpc[i]) {
+ case 6:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component;
+ break;
+ case 8:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component;
+ break;
+ case 10:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component;
+ break;
+ default:
+ data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component;
+ break;
+ }
+ }
+ if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
+ data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i]));
+ }
+ else {
+ data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
+ }
+ data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk);
+ data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput)));
+ data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput));
+ }
+ }
+ data->total_dispclk_required_with_ramping = bw_int_to_fixed(0);
+ data->total_dispclk_required_without_ramping = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) {
+ data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i];
+ }
+ if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) {
+ data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i];
+ }
+ }
+ }
+ data->total_read_request_bandwidth = bw_int_to_fixed(0);
+ data->total_write_request_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]);
+ }
+ else {
+ data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]);
+ }
+ }
+ }
+ data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency);
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth);
+ if (data->cpuc_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]);
+ }
+ if (data->cpup_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]);
+ }
+ if (data->nbp_state_change_enable == bw_def_yes) {
+ data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+ data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]);
+ }
+ if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+ data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth;
+ }
+ else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) {
+ data->dispclk = vbios->high_voltage_max_dispclk;
+ }
+ else {
+ data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth;
+ }
+ /* required core voltage*/
+ /* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/
+ /* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/
+ /* otherwise, the core voltage required is high if the three clocks are within the high limits*/
+ /* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/
+ if (pipe_check == bw_def_notok) {
+ voltage = bw_def_na;
+ }
+ else if (mode_check == bw_def_notok) {
+ voltage = bw_def_notok;
+ }
+ else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) {
+ voltage = bw_def_0_72;
+ }
+ else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) {
+ voltage = bw_def_0_8;
+ }
+ else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) {
+ if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) {
+ voltage = bw_def_high_no_nbp_state_change;
+ }
+ else {
+ voltage = bw_def_0_9;
+ }
+ }
+ else {
+ voltage = bw_def_notok;
+ }
+ if (voltage == bw_def_0_72) {
+ data->max_phyclk = vbios->low_voltage_max_phyclk;
+ }
+ else if (voltage == bw_def_0_8) {
+ data->max_phyclk = vbios->mid_voltage_max_phyclk;
+ }
+ else {
+ data->max_phyclk = vbios->high_voltage_max_phyclk;
+ }
+ /*required blackout recovery time*/
+ data->blackout_recovery_time = bw_int_to_fixed(0);
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) {
+ if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]));
+ if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+ }
+ }
+ else {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]));
+ if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) {
+ data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k])))));
+ }
+ }
+ }
+ }
+ /*sclk deep sleep*/
+ /*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/
+ /*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/
+ /*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/
+ /*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/
+ /*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) {
+ data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+ }
+ else if (surface_type[i] == bw_def_graphics) {
+ data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ else if (data->orthogonal_rotation[i] == 0) {
+ data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16);
+ }
+ else {
+ data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i]));
+ }
+ }
+ }
+ data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) {
+ data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i];
+ }
+ }
+ }
+ data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth);
+ /*urgent, stutter and nb-p_state watermark*/
+ /*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/
+ /*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/
+ /*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/
+ /*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/
+ /*blackout_duration is added to the urgent watermark*/
+ data->chunk_request_time = bw_int_to_fixed(0);
+ data->cursor_request_time = bw_int_to_fixed(0);
+ /*compute total time to request one chunk from each active display pipe*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2))))));
+ }
+ }
+ /*compute total time to request cursor data*/
+ data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level]))));
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i]));
+ if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) {
+ data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+ data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+ data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]);
+ /*unconditionally remove black out time from the nb p_state watermark*/
+ if ((data->display_pstate_change_enable[i] == 1)) {
+ data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+ }
+ else {
+ /*maximize the watermark to force the switch in the vb_lank region of the frame*/
+ data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+ }
+ }
+ else {
+ data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time);
+ data->stutter_exit_watermark[i] = bw_int_to_fixed(0);
+ data->stutter_entry_watermark[i] = bw_int_to_fixed(0);
+ if ((data->display_pstate_change_enable[i] == 1)) {
+ data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level]));
+ }
+ else {
+ /*maximize the watermark to force the switch in the vb_lank region of the frame*/
+ data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000);
+ }
+ }
+ }
+ }
+ /*stutter mode enable*/
+ /*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/
+ /*display pipe.*/
+ data->stutter_mode_enable = data->cpuc_state_change_enable;
+ if (data->number_of_displays > 1) {
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) {
+ data->stutter_mode_enable = bw_def_no;
+ }
+ }
+ }
+ }
+ /*performance metrics*/
+ /* display read access efficiency (%)*/
+ /* display write back access efficiency (%)*/
+ /* stutter efficiency (%)*/
+ /* extra underlay pitch recommended for efficiency (pixels)*/
+ /* immediate flip time (us)*/
+ /* latency for other clients due to urgent display read (us)*/
+ /* latency for other clients due to urgent display write (us)*/
+ /* average bandwidth consumed by display (no compression) (gb/s)*/
+ /* required dram bandwidth (gb/s)*/
+ /* required sclk (m_hz)*/
+ /* required rd urgent latency (us)*/
+ /* nb p-state change margin (us)*/
+ /*dmif and mcifwr dram access efficiency*/
+ /*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/
+ data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1));
+ if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) {
+ data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1));
+ }
+ else {
+ data->mcifwrdram_access_efficiency = bw_int_to_fixed(0);
+ }
+ /*average bandwidth*/
+ /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/
+ /*the average bandwidth with compression is the same, divided by the compression ratio*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]);
+ data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]);
+ }
+ }
+ data->total_average_bandwidth_no_compression = bw_int_to_fixed(0);
+ data->total_average_bandwidth = bw_int_to_fixed(0);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]);
+ data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]);
+ }
+ }
+ /*stutter efficiency*/
+ /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/
+ /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/
+ /*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/
+ /*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/
+ /*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/
+ /*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i]))));
+ data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]);
+ }
+ }
+ data->min_stutter_refresh_duration = bw_int_to_fixed(9999);
+ data->total_stutter_dmif_buffer_size = 0;
+ data->total_bytes_requested = 0;
+ data->min_stutter_dmif_buffer_size = 9999;
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) {
+ data->min_stutter_refresh_duration = data->stutter_refresh_duration[i];
+ data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i])))));
+ data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]);
+ }
+ data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size)));
+ }
+ }
+ data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_min2(bw_mul(data->dram_bandwidth, data->dmifdram_access_efficiency), bw_mul(sclk[data->sclk_level], bw_int_to_fixed(32))));
+ data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size;
+ data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time);
+ data->time_in_self_refresh = data->min_stutter_refresh_duration;
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->stutter_efficiency = bw_int_to_fixed(0);
+ }
+ else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) {
+ data->stutter_efficiency = bw_int_to_fixed(0);
+ }
+ else {
+ /*compute stutter efficiency assuming 60 hz refresh rate*/
+ data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100)));
+ }
+ /*immediate flip time*/
+ /*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/
+ /*otherwise, it may take just one urgenr memory trip*/
+ data->worst_number_of_trips_to_memory = bw_int_to_fixed(1);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) {
+ data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1));
+ if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) {
+ data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i];
+ }
+ }
+ }
+ data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency);
+ /*worst latency for other clients*/
+ /*it is the urgent latency plus the urgent burst time*/
+ data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]);
+ if (data->d1_display_write_back_dwb_enable == 1) {
+ data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time);
+ }
+ else {
+ data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0);
+ }
+ /*dmif mc urgent latency suppported in high sclk and yclk*/
+ data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips);
+ /*dram speed/p-state change margin*/
+ /*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/
+ data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+ data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999);
+ for (i = 0; i <= maximum_number_of_surfaces - 1; i++) {
+ if (data->enable[i]) {
+ data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+ data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency));
+ }
+ }
+ /*sclk required vs urgent latency*/
+ for (i = 1; i <= 5; i++) {
+ data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i)));
+ if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) {
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_int_to_fixed(bus_efficiency))));
+ }
+ else {
+ data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na);
+ }
+ }
+ /*output link bit per pixel supported*/
+ for (k = 0; k <= maximum_number_of_surfaces - 1; k++) {
+ data->output_bpphdmi[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr2[k] = bw_def_na;
+ data->output_bppdp4_lane_hbr3[k] = bw_def_na;
+ if (data->enable[k]) {
+ data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24)));
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
+ data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) {
+ data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) {
+ data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8)));
+ }
+ }
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void bw_calcs_init(struct bw_calcs_dceip *bw_dceip,
+ struct bw_calcs_vbios *bw_vbios,
+ struct hw_asic_id asic_id)
+{
+ struct bw_calcs_dceip dceip = { 0 };
+ struct bw_calcs_vbios vbios = { 0 };
+
+ enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id);
+
+ dceip.version = version;
+
+ switch (version) {
+ case BW_CALCS_VERSION_CARRIZO:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 64;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(1600);
+ vbios.mid_yclk = bw_int_to_fixed(1600);
+ vbios.low_yclk = bw_frc_to_fixed(66666, 100);
+ vbios.low_sclk = bw_int_to_fixed(200);
+ vbios.mid1_sclk = bw_int_to_fixed(300);
+ vbios.mid2_sclk = bw_int_to_fixed(300);
+ vbios.mid3_sclk = bw_int_to_fixed(300);
+ vbios.mid4_sclk = bw_int_to_fixed(300);
+ vbios.mid5_sclk = bw_int_to_fixed(300);
+ vbios.mid6_sclk = bw_int_to_fixed(300);
+ vbios.high_sclk = bw_frc_to_fixed(62609, 100);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(50);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_frc_to_fixed(19649, 1000);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 3;
+ dceip.number_of_underlay_pipes = 1;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = false;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 2;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(0);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/
+ break;
+ case BW_CALCS_VERSION_POLARIS10:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(6000);
+ vbios.mid_yclk = bw_int_to_fixed(3200);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(400);
+ vbios.mid2_sclk = bw_int_to_fixed(500);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(700);
+ vbios.mid5_sclk = bw_int_to_fixed(800);
+ vbios.mid6_sclk = bw_int_to_fixed(974);
+ vbios.high_sclk = bw_int_to_fixed(1154);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 6;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_POLARIS11:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 32;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(6000);
+ vbios.mid_yclk = bw_int_to_fixed(3200);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(400);
+ vbios.mid2_sclk = bw_int_to_fixed(500);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(700);
+ vbios.mid5_sclk = bw_int_to_fixed(800);
+ vbios.mid6_sclk = bw_int_to_fixed(974);
+ vbios.high_sclk = bw_int_to_fixed(1154);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(459);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ if (vbios.number_of_dram_channels == 2) // 64-bit
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ else
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(45);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 5;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_STONEY:
+ vbios.memory_type = bw_def_gddr5;
+ vbios.dram_channel_width_in_bits = 64;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 8;
+ vbios.high_yclk = bw_int_to_fixed(1866);
+ vbios.mid_yclk = bw_int_to_fixed(1866);
+ vbios.low_yclk = bw_int_to_fixed(1333);
+ vbios.low_sclk = bw_int_to_fixed(200);
+ vbios.mid1_sclk = bw_int_to_fixed(600);
+ vbios.mid2_sclk = bw_int_to_fixed(600);
+ vbios.mid3_sclk = bw_int_to_fixed(600);
+ vbios.mid4_sclk = bw_int_to_fixed(600);
+ vbios.mid5_sclk = bw_int_to_fixed(600);
+ vbios.mid6_sclk = bw_int_to_fixed(600);
+ vbios.high_sclk = bw_int_to_fixed(800);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(352);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(467);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(643);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(50);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(4);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0);
+ vbios.nbp_state_change_latency = bw_frc_to_fixed(2008, 100);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = true;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256;
+ vbios.blackout_duration = bw_int_to_fixed(18); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(20);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(768);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = false;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 2;
+ dceip.number_of_underlay_pipes = 1;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = false;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 2;
+ dceip.graphics_dmif_size = 12288;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = true;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(82176);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = false;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(0);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ case BW_CALCS_VERSION_VEGA10:
+ vbios.memory_type = bw_def_hbm;
+ vbios.dram_channel_width_in_bits = 128;
+ vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits;
+ vbios.number_of_dram_banks = 16;
+ vbios.high_yclk = bw_int_to_fixed(2400);
+ vbios.mid_yclk = bw_int_to_fixed(1700);
+ vbios.low_yclk = bw_int_to_fixed(1000);
+ vbios.low_sclk = bw_int_to_fixed(300);
+ vbios.mid1_sclk = bw_int_to_fixed(350);
+ vbios.mid2_sclk = bw_int_to_fixed(400);
+ vbios.mid3_sclk = bw_int_to_fixed(500);
+ vbios.mid4_sclk = bw_int_to_fixed(600);
+ vbios.mid5_sclk = bw_int_to_fixed(700);
+ vbios.mid6_sclk = bw_int_to_fixed(760);
+ vbios.high_sclk = bw_int_to_fixed(776);
+ vbios.low_voltage_max_dispclk = bw_int_to_fixed(460);
+ vbios.mid_voltage_max_dispclk = bw_int_to_fixed(670);
+ vbios.high_voltage_max_dispclk = bw_int_to_fixed(1133);
+ vbios.low_voltage_max_phyclk = bw_int_to_fixed(540);
+ vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.high_voltage_max_phyclk = bw_int_to_fixed(810);
+ vbios.data_return_bus_width = bw_int_to_fixed(32);
+ vbios.trc = bw_int_to_fixed(48);
+ vbios.dmifmc_urgent_latency = bw_int_to_fixed(3);
+ vbios.stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10);
+ vbios.stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10);
+ vbios.nbp_state_change_latency = bw_int_to_fixed(39);
+ vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10);
+ vbios.scatter_gather_enable = false;
+ vbios.down_spread_percentage = bw_frc_to_fixed(5, 10);
+ vbios.cursor_width = 32;
+ vbios.average_compression_rate = 4;
+ vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8;
+ vbios.blackout_duration = bw_int_to_fixed(0); /* us */
+ vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0);
+
+ dceip.large_cursor = false;
+ dceip.dmif_request_buffer_size = bw_int_to_fixed(2304);
+ dceip.dmif_pipe_en_fbc_chunk_tracker = true;
+ dceip.cursor_max_outstanding_group_num = 1;
+ dceip.lines_interleaved_into_lb = 2;
+ dceip.chunk_width = 256;
+ dceip.number_of_graphics_pipes = 6;
+ dceip.number_of_underlay_pipes = 0;
+ dceip.low_power_tiling_mode = 0;
+ dceip.display_write_back_supported = true;
+ dceip.argb_compression_support = true;
+ dceip.underlay_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35556, 10000);
+ dceip.underlay_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.underlay_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.underlay_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.graphics_vscaler_efficiency6_bit_per_component =
+ bw_frc_to_fixed(35, 10);
+ dceip.graphics_vscaler_efficiency8_bit_per_component =
+ bw_frc_to_fixed(34286, 10000);
+ dceip.graphics_vscaler_efficiency10_bit_per_component =
+ bw_frc_to_fixed(32, 10);
+ dceip.graphics_vscaler_efficiency12_bit_per_component =
+ bw_int_to_fixed(3);
+ dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3);
+ dceip.max_dmif_buffer_allocated = 4;
+ dceip.graphics_dmif_size = 24576;
+ dceip.underlay_luma_dmif_size = 19456;
+ dceip.underlay_chroma_dmif_size = 23552;
+ dceip.pre_downscaler_enabled = true;
+ dceip.underlay_downscale_prefetch_enabled = false;
+ dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1);
+ dceip.lb_size_per_component444 = bw_int_to_fixed(245952);
+ dceip.graphics_lb_nodownscaling_multi_line_prefetching = true;
+ dceip.stutter_and_dram_clock_state_change_gated_before_cursor =
+ bw_int_to_fixed(1);
+ dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.underlay420_chroma_lb_size_per_component =
+ bw_int_to_fixed(164352);
+ dceip.underlay422_lb_size_per_component = bw_int_to_fixed(
+ 82176);
+ dceip.cursor_chunk_width = bw_int_to_fixed(64);
+ dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4);
+ dceip.underlay_maximum_width_efficient_for_tiling =
+ bw_int_to_fixed(1920);
+ dceip.underlay_maximum_height_efficient_for_tiling =
+ bw_int_to_fixed(1080);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display =
+ bw_frc_to_fixed(3, 10);
+ dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation =
+ bw_int_to_fixed(25);
+ dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed(
+ 2);
+ dceip.maximum_total_outstanding_pte_requests_allowed_by_saw =
+ bw_int_to_fixed(128);
+ dceip.limit_excessive_outstanding_dmif_requests = true;
+ dceip.linear_mode_line_request_alternation_slice =
+ bw_int_to_fixed(64);
+ dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode =
+ 32;
+ dceip.display_write_back420_luma_mcifwr_buffer_size = 12288;
+ dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192;
+ dceip.request_efficiency = bw_frc_to_fixed(8, 10);
+ dceip.dispclk_per_request = bw_int_to_fixed(2);
+ dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100);
+ dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100);
+ dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2;
+ dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0);
+ break;
+ default:
+ break;
+ }
+ *bw_dceip = dceip;
+ *bw_vbios = vbios;
+
+}
+
+/**
+ * Compare calculated (required) clocks against the clocks available at
+ * maximum voltage (max Performance Level).
+ */
+static bool is_display_configuration_supported(
+ const struct bw_calcs_vbios *vbios,
+ const struct dce_bw_output *calcs_output)
+{
+ uint32_t int_max_clk;
+
+ int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk);
+ int_max_clk *= 1000; /* MHz to kHz */
+ if (calcs_output->dispclk_khz > int_max_clk)
+ return false;
+
+ int_max_clk = bw_fixed_to_int(vbios->high_sclk);
+ int_max_clk *= 1000; /* MHz to kHz */
+ if (calcs_output->sclk_khz > int_max_clk)
+ return false;
+
+ return true;
+}
+
+static void populate_initial_data(
+ const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data)
+{
+ int i, j;
+ int num_displays = 0;
+
+ data->underlay_surface_type = bw_def_420;
+ data->panning_and_bezel_adjustment = bw_def_none;
+ data->graphics_lb_bpc = 10;
+ data->underlay_lb_bpc = 8;
+ data->underlay_tiling_mode = bw_def_tiled;
+ data->graphics_tiling_mode = bw_def_tiled;
+ data->underlay_micro_tile_mode = bw_def_display_micro_tiling;
+ data->graphics_micro_tile_mode = bw_def_display_micro_tiling;
+
+ /* Pipes with underlay first */
+ for (i = 0; i < pipe_count; i++) {
+ if (!pipe[i].stream || !pipe[i].bottom_pipe)
+ continue;
+
+ ASSERT(pipe[i].plane_state);
+
+ if (num_displays == 0) {
+ if (!pipe[i].plane_state->visible)
+ data->d0_underlay_mode = bw_def_underlay_only;
+ else
+ data->d0_underlay_mode = bw_def_blend;
+ } else {
+ if (!pipe[i].plane_state->visible)
+ data->d1_underlay_mode = bw_def_underlay_only;
+ else
+ data->d1_underlay_mode = bw_def_blend;
+ }
+
+ data->fbc_en[num_displays + 4] = false;
+ data->lpt_en[num_displays + 4] = false;
+ data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+ data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+ data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ switch (pipe[i].plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ data->bytes_per_pixel[num_displays + 4] = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ data->bytes_per_pixel[num_displays + 4] = 8;
+ break;
+ default:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ }
+ data->interlace_mode[num_displays + 4] = false;
+ data->stereo_mode[num_displays + 4] = bw_def_mono;
+
+
+ for (j = 0; j < 2; j++) {
+ data->fbc_en[num_displays * 2 + j] = false;
+ data->lpt_en[num_displays * 2 + j] = false;
+
+ data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height);
+ data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed(
+ pipe[i].bottom_pipe->plane_state->plane_size.grph.surface_pitch);
+ data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+ pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed(
+ pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].bottom_pipe->plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ data->stereo_mode[num_displays * 2 + j] = bw_def_mono;
+ }
+
+ num_displays++;
+ }
+
+ /* Pipes without underlay after */
+ for (i = 0; i < pipe_count; i++) {
+ if (!pipe[i].stream || pipe[i].bottom_pipe)
+ continue;
+
+
+ data->fbc_en[num_displays + 4] = false;
+ data->lpt_en[num_displays + 4] = false;
+ data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total);
+ data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total);
+ data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_khz, 1000);
+ if (pipe[i].plane_state) {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps);
+ data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value);
+ data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value);
+ switch (pipe[i].plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ break;
+ case ROTATION_ANGLE_90:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90);
+ break;
+ case ROTATION_ANGLE_180:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180);
+ break;
+ case ROTATION_ANGLE_270:
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270);
+ break;
+ default:
+ break;
+ }
+ switch (pipe[i].plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ data->bytes_per_pixel[num_displays + 4] = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ data->bytes_per_pixel[num_displays + 4] = 8;
+ break;
+ default:
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ break;
+ }
+ } else {
+ data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable);
+ data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4];
+ data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable);
+ data->h_taps[num_displays + 4] = bw_int_to_fixed(1);
+ data->v_taps[num_displays + 4] = bw_int_to_fixed(1);
+ data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+ data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1);
+ data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0);
+ data->bytes_per_pixel[num_displays + 4] = 4;
+ }
+
+ data->interlace_mode[num_displays + 4] = false;
+ data->stereo_mode[num_displays + 4] = bw_def_mono;
+ num_displays++;
+ }
+
+ data->number_of_displays = num_displays;
+}
+
+/**
+ * Return:
+ * true - Display(s) configuration supported.
+ * In this case 'calcs_output' contains data for HW programming
+ * false - Display(s) configuration not supported (not enough bandwidth).
+ */
+
+bool bw_calcs(struct dc_context *ctx,
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ const struct pipe_ctx pipe[],
+ int pipe_count,
+ struct dce_bw_output *calcs_output)
+{
+ struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data),
+ GFP_KERNEL);
+ if (!data)
+ return false;
+
+ populate_initial_data(pipe, pipe_count, data);
+
+ /*TODO: this should be taken out calcs output and assigned during timing sync for pplib use*/
+ calcs_output->all_displays_in_sync = false;
+
+ if (data->number_of_displays != 0) {
+ uint8_t yclk_lvl, sclk_lvl;
+ struct bw_fixed high_sclk = vbios->high_sclk;
+ struct bw_fixed mid1_sclk = vbios->mid1_sclk;
+ struct bw_fixed mid2_sclk = vbios->mid2_sclk;
+ struct bw_fixed mid3_sclk = vbios->mid3_sclk;
+ struct bw_fixed mid4_sclk = vbios->mid4_sclk;
+ struct bw_fixed mid5_sclk = vbios->mid5_sclk;
+ struct bw_fixed mid6_sclk = vbios->mid6_sclk;
+ struct bw_fixed low_sclk = vbios->low_sclk;
+ struct bw_fixed high_yclk = vbios->high_yclk;
+ struct bw_fixed mid_yclk = vbios->mid_yclk;
+ struct bw_fixed low_yclk = vbios->low_yclk;
+
+ calculate_bandwidth(dceip, vbios, data);
+
+ yclk_lvl = data->y_clk_level;
+ sclk_lvl = data->sclk_level;
+
+ calcs_output->nbp_state_change_enable =
+ data->nbp_state_change_enable;
+ calcs_output->cpuc_state_change_enable =
+ data->cpuc_state_change_enable;
+ calcs_output->cpup_state_change_enable =
+ data->cpup_state_change_enable;
+ calcs_output->stutter_mode_enable =
+ data->stutter_mode_enable;
+ calcs_output->dispclk_khz =
+ bw_fixed_to_int(bw_mul(data->dispclk,
+ bw_int_to_fixed(1000)));
+ calcs_output->blackout_recovery_time_us =
+ bw_fixed_to_int(data->blackout_recovery_time);
+ calcs_output->sclk_khz =
+ bw_fixed_to_int(bw_mul(data->required_sclk,
+ bw_int_to_fixed(1000)));
+ calcs_output->sclk_deep_sleep_khz =
+ bw_fixed_to_int(bw_mul(data->sclk_deep_sleep,
+ bw_int_to_fixed(1000)));
+ if (yclk_lvl == 0)
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(low_yclk, bw_int_to_fixed(1000)));
+ else if (yclk_lvl == 1)
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(mid_yclk, bw_int_to_fixed(1000)));
+ else
+ calcs_output->yclk_khz = bw_fixed_to_int(
+ bw_mul(high_yclk, bw_int_to_fixed(1000)));
+
+ /* units: nanosecond, 16bit storage. */
+
+ calcs_output->nbp_state_change_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->stutter_exit_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->urgent_wm_ns[0].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].a_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ if (dceip->version != BW_CALCS_VERSION_CARRIZO) {
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4],bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->stutter_exit_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+
+ calcs_output->urgent_wm_ns[0].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+
+ calcs_output->stutter_exit_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+ calcs_output->urgent_wm_ns[0].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].c_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+ }
+
+ if (dceip->version == BW_CALCS_VERSION_CARRIZO) {
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk;
+ ((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk;
+ } else {
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk;
+ }
+
+ calculate_bandwidth(dceip, vbios, data);
+
+ calcs_output->nbp_state_change_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->nbp_state_change_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->nbp_state_change_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->nbp_state_change_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->nbp_state_change_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ nbp_state_change_watermark[9], bw_int_to_fixed(1000)));
+
+ calcs_output->stutter_exit_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_exit_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_exit_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_exit_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_exit_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_exit_watermark[9], bw_int_to_fixed(1000)));
+
+
+ calcs_output->urgent_wm_ns[0].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[1].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[2].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->urgent_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->urgent_wm_ns[3].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->urgent_wm_ns[4].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->urgent_wm_ns[5].d_mark =
+ bw_fixed_to_int(bw_mul(data->
+ urgent_watermark[9], bw_int_to_fixed(1000)));
+
+ ((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk;
+ ((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk;
+ ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk;
+ ((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk;
+ ((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk;
+ } else {
+ calcs_output->nbp_state_change_enable = true;
+ calcs_output->cpuc_state_change_enable = true;
+ calcs_output->cpup_state_change_enable = true;
+ calcs_output->stutter_mode_enable = true;
+ calcs_output->dispclk_khz = 0;
+ calcs_output->sclk_khz = 0;
+ }
+
+ kfree(data);
+
+ return is_display_configuration_supported(vbios, calcs_output);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
new file mode 100644
index 000000000000..626f9cf8aad2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c
@@ -0,0 +1,1899 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calc_auto.h"
+#include "dcn_calc_math.h"
+
+/*REVISION#250*/
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v)
+{
+ int k;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->allow_different_hratio_vratio == dcn_bw_yes) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k];
+ v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k];
+ }
+ else {
+ v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k];
+ v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k];
+ }
+ }
+ else {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]);
+ }
+ else {
+ v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]);
+ }
+ v->v_ratio[k] = v->h_ratio[k];
+ }
+ if (v->interlace_output[k] == 1.0) {
+ v->v_ratio[k] = 2.0 * v->v_ratio[k];
+ }
+ if ((v->underscan_output[k] == 1.0)) {
+ v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor;
+ v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor;
+ }
+ }
+ /*scaler taps calculation*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0));
+ }
+ else if (v->h_ratio[k] < 1.0) {
+ v->acceptable_quality_hta_ps = 4.0;
+ }
+ else {
+ v->acceptable_quality_hta_ps = 1.0;
+ }
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->htaps[k] = v->override_hta_ps[k];
+ }
+ else {
+ v->htaps[k] = v->acceptable_quality_hta_ps;
+ }
+ if (v->v_ratio[k] > 1.0) {
+ v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0));
+ }
+ else if (v->v_ratio[k] < 1.0) {
+ v->acceptable_quality_vta_ps = 4.0;
+ }
+ else {
+ v->acceptable_quality_vta_ps = 1.0;
+ }
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->vtaps[k] = v->override_vta_ps[k];
+ }
+ else {
+ v->vtaps[k] = v->acceptable_quality_vta_ps;
+ }
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->vta_pschroma[k] = 0.0;
+ v->hta_pschroma[k] = 0.0;
+ }
+ else {
+ if (v->ta_pscalculation == dcn_bw_override) {
+ v->vta_pschroma[k] = v->override_vta_pschroma[k];
+ v->hta_pschroma[k] = v->override_hta_pschroma[k];
+ }
+ else {
+ v->vta_pschroma[k] = v->acceptable_quality_vta_ps;
+ v->hta_pschroma[k] = v->acceptable_quality_hta_ps;
+ }
+ }
+ }
+}
+
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v)
+{
+ int i;
+ int j;
+ int k;
+ /*mode support, voltage state and soc configuration*/
+
+ /*scale ratio support check*/
+
+ v->scale_ratio_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) {
+ v->scale_ratio_support = dcn_bw_no;
+ }
+ }
+ /*source format, pixel format and scan support check*/
+
+ v->source_format_pixel_and_scan_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) {
+ v->source_format_pixel_and_scan_support = dcn_bw_no;
+ }
+ }
+ /*bandwidth support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width_ysingle_dpp[k] = v->viewport_width[k];
+ }
+ else {
+ v->swath_width_ysingle_dpp[k] = v->viewport_height[k];
+ }
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pixel_in_dety[k] = 8.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pixel_in_dety[k] = 4.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pixel_in_dety[k] = 2.0;
+ v->byte_per_pixel_in_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pixel_in_dety[k] = 1.0;
+ v->byte_per_pixel_in_detc[k] = 2.0;
+ }
+ else {
+ v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f;
+ v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f;
+ }
+ }
+ v->total_read_bandwidth_consumed_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]);
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+ }
+ if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64);
+ }
+ else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256);
+ }
+ else if (v->pte_enable == dcn_bw_yes) {
+ v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512);
+ }
+ v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0;
+ }
+ v->total_write_bandwidth_consumed_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+ v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+ }
+ else if (v->output[k] == dcn_bw_writeback) {
+ v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+ }
+ else {
+ v->write_bandwidth[k] = 0.0;
+ }
+ v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0;
+ }
+ v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second;
+ v->dcc_enabled_in_any_plane = dcn_bw_no;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->dcc_enabled_in_any_plane = dcn_bw_yes;
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+ v->return_bw_per_state[i] = v->return_bw_todcn_per_state;
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) {
+ v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) {
+ v->bandwidth_support[i] = dcn_bw_yes;
+ }
+ else {
+ v->bandwidth_support[i] = dcn_bw_no;
+ }
+ }
+ /*writeback latency support check*/
+
+ v->writeback_latency_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+ v->writeback_latency_support = dcn_bw_no;
+ }
+ else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) {
+ v->writeback_latency_support = dcn_bw_no;
+ }
+ }
+ /*re-ordering buffer support check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i];
+ if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) {
+ v->rob_support[i] = dcn_bw_yes;
+ }
+ else {
+ v->rob_support[i] = dcn_bw_no;
+ }
+ }
+ /*display io support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) {
+ if (v->output_format[k] == dcn_bw_420) {
+ v->required_output_bw = v->pixel_clock[k] / 2.0;
+ }
+ else {
+ v->required_output_bw = v->pixel_clock[k];
+ }
+ }
+ else if (v->output_format[k] == dcn_bw_420) {
+ v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0;
+ }
+ else {
+ v->required_output_bw = v->pixel_clock[k] * 3.0;
+ }
+ if (v->output[k] == dcn_bw_hdmi) {
+ v->required_phyclk[k] = v->required_output_bw;
+ switch (v->output_deep_color[k]) {
+ case dcn_bw_encoder_10bpc:
+ v->required_phyclk[k] = v->required_phyclk[k] * 5.0 / 4;
+ break;
+ case dcn_bw_encoder_12bpc:
+ v->required_phyclk[k] = v->required_phyclk[k] * 3.0 / 2;
+ break;
+ default:
+ break;
+ }
+ v->required_phyclk[k] = v->required_phyclk[k] / 3.0;
+ }
+ else if (v->output[k] == dcn_bw_dp) {
+ v->required_phyclk[k] = v->required_output_bw / 4.0;
+ }
+ else {
+ v->required_phyclk[k] = 0.0;
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ v->dio_support[i] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) {
+ v->dio_support[i] = dcn_bw_no;
+ }
+ }
+ }
+ /*total available writeback support check*/
+
+ v->total_number_of_active_writeback = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback) {
+ v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0;
+ }
+ }
+ if (v->total_number_of_active_writeback <= v->max_num_writeback) {
+ v->total_available_writeback_support = dcn_bw_yes;
+ }
+ else {
+ v->total_available_writeback_support = dcn_bw_no;
+ }
+ /*maximum dispclk/dppclk support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->pscl_factor_chroma[k] = 0.0;
+ v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0);
+ }
+ else {
+ if (v->h_ratio[k] / 2.0 > 1.0) {
+ v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0);
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_block_height_y[k] = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->read256_block_height_y[k] = 4.0;
+ }
+ else {
+ v->read256_block_height_y[k] = 8.0;
+ }
+ v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+ v->read256_block_height_c[k] = 0.0;
+ v->read256_block_width_c[k] = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_block_height_y[k] = 1.0;
+ v->read256_block_height_c[k] = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->read256_block_height_y[k] = 16.0;
+ v->read256_block_height_c[k] = 8.0;
+ }
+ else {
+ v->read256_block_height_y[k] = 8.0;
+ v->read256_block_height_c[k] = 8.0;
+ }
+ v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k];
+ v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k];
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->max_swath_height_y[k] = v->read256_block_height_y[k];
+ v->max_swath_height_c[k] = v->read256_block_height_c[k];
+ }
+ else {
+ v->max_swath_height_y[k] = v->read256_block_width_y[k];
+ v->max_swath_height_c[k] = v->read256_block_width_c[k];
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ }
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ else {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+ }
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+ v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0;
+ }
+ }
+ else {
+ v->min_swath_height_y[k] = v->max_swath_height_y[k];
+ v->min_swath_height_c[k] = v->max_swath_height_c[k];
+ }
+ }
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->maximum_swath_width = 8192.0;
+ }
+ else {
+ v->maximum_swath_width = 5120.0;
+ }
+ v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0);
+ }
+ else {
+ v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0));
+ }
+ v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size);
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_active_dpp[i][j] = 0.0;
+ v->required_dispclk[i][j] = 0.0;
+ v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ if (v->odm_capability == dcn_bw_yes) {
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ }
+ else {
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ }
+ if (i < number_of_states) {
+ v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ }
+ if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+ v->no_of_dpp[i][j][k] = 1.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+ }
+ else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ }
+ else {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) {
+ v->total_number_of_active_dpp[i][j] = 0.0;
+ v->required_dispclk[i][j] = 0.0;
+ v->dispclk_dppclk_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0);
+ if (i < number_of_states) {
+ v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0);
+ }
+ if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) {
+ v->no_of_dpp[i][j][k] = 1.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp);
+ if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ }
+ else {
+ v->no_of_dpp[i][j][k] = 2.0;
+ v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp);
+ if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) {
+ v->dispclk_dppclk_support[i][j] = dcn_bw_no;
+ }
+ }
+ v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ }
+ }
+ }
+ /*viewport size check*/
+
+ v->viewport_size_support = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) {
+ v->viewport_size_support = dcn_bw_no;
+ }
+ }
+ /*total available pipes support check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) {
+ v->total_available_pipes_support[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->total_available_pipes_support[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ /*urgent latency support check*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k];
+ v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k];
+ v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+ }
+ if (v->max_swath_height_c[k] > 0.0) {
+ v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k];
+ }
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k];
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
+ if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+ v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k];
+ v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k];
+ }
+ else {
+ v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k];
+ v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k];
+ }
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = 0.0;
+ }
+ else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+ }
+ else {
+ v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k];
+ v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0);
+ }
+ v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+ v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]);
+ v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]));
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->urgent_latency_support[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) {
+ v->urgent_latency_support[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ /*prefetch check*/
+
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_dcc_active_dpp[i][j] = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k];
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->projected_dcfclk_deep_sleep = 8.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0);
+ if (v->byte_per_pixel_in_detc[k] == 0.0) {
+ if (v->v_ratio[k] <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ }
+ else {
+ if (v->v_ratio[k] <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ if (v->v_ratio[k] / 2.0 <= 1.0) {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]);
+ }
+ else {
+ v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j));
+ }
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_req_height_y = 8.0 * v->read256_block_height_y[k];
+ v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y;
+ v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y;
+ v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_per_frame_y = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+ }
+ else {
+ v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_per_frame_y = 0.0;
+ v->meta_row_bytes_y = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_block_size_bytes_y = 256.0;
+ v->macro_tile_block_height_y = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_block_size_bytes_y = 4096.0;
+ v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k];
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_block_size_bytes_y = 64.0 * 1024;
+ v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k];
+ }
+ else {
+ v->macro_tile_block_size_bytes_y = 256.0 * 1024;
+ v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k];
+ }
+ if (v->macro_tile_block_size_bytes_y <= 65536.0) {
+ v->data_pte_req_height_y = v->macro_tile_block_height_y;
+ }
+ else {
+ v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k];
+ }
+ v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1);
+ }
+ else {
+ v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1);
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_y = 0.0;
+ }
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_req_height_c = 8.0 * v->read256_block_height_c[k];
+ v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c;
+ v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c;
+ v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+ }
+ else {
+ v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ v->meta_row_bytes_c = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_block_size_bytes_c = 256.0;
+ v->macro_tile_block_height_c = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_block_size_bytes_c = 4096.0;
+ v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k];
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_block_size_bytes_c = 64.0 * 1024;
+ v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k];
+ }
+ else {
+ v->macro_tile_block_size_bytes_c = 256.0 * 1024;
+ v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k];
+ }
+ v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c;
+ if (v->macro_tile_block_size_bytes_c <= 65536.0) {
+ v->data_pte_req_height_c = v->macro_tile_block_height_c;
+ }
+ else {
+ v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k];
+ }
+ v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1);
+ }
+ else {
+ v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1);
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_c = 0.0;
+ }
+ }
+ else {
+ v->dpte_bytes_per_row_c = 0.0;
+ v->meta_pte_bytes_per_frame_c = 0.0;
+ v->meta_row_bytes_c = 0.0;
+ }
+ v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c;
+ v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c;
+ v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c;
+ v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0;
+ v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0);
+ v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1;
+ if (v->prefill_y[k] > 1.0) {
+ v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]);
+ }
+ else {
+ v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]);
+ }
+ v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y);
+ v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y;
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0;
+ v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0);
+ v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1;
+ if (v->prefill_c[k] > 1.0) {
+ v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]);
+ }
+ else {
+ v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]);
+ }
+ v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c);
+ v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c;
+ }
+ else {
+ v->prefetch_lines_c[k] = 0.0;
+ }
+ v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j];
+ if (v->no_of_dpp[i][j][k] > 1.0) {
+ v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+ }
+ if (v->output_format[k] == dcn_bw_420) {
+ v->dst_y_after_scaler = 1.0;
+ }
+ else {
+ v->dst_y_after_scaler = 0.0;
+ }
+ v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep;
+ v->v_update_offset[k] =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]);
+ v->v_update_width[k] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k];
+ v->v_ready_offset[k] =dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k];
+ v->time_setup = (v->v_update_offset[k] + v->v_update_width[k] + v->v_ready_offset[k]) / v->pixel_clock[k];
+ v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i];
+ if (v->pte_enable == dcn_bw_yes) {
+ v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i];
+ }
+ if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+ v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0;
+ }
+ else {
+ v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0;
+ }
+ v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]);
+ v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4;
+ v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+ }
+ v->bw_available_for_immediate_flip = v->return_bw_per_state[i];
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]);
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_immediate_flip_bytes[k] = 0.0;
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k];
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+ v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ v->time_for_meta_pte_without_immediate_flip =dcn_bw_max3(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ else {
+ v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ }
+ if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) {
+ v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency);
+ v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max3((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency);
+ }
+ else {
+ v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip);
+ v->time_for_meta_and_dpte_row_without_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, v->extra_latency - v->time_for_meta_pte_without_immediate_flip);
+ }
+ v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k];
+ v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k];
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0;
+ v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0;
+ v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0;
+ }
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ if ((v->swath_height_yper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ if ((v->swath_height_cper_state[i][j][k] > 4.0)) {
+ if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) {
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0;
+ v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0;
+ v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0;
+ }
+ }
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]));
+ }
+ else {
+ v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+ }
+ }
+ v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]);
+ }
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+ if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) {
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) {
+ v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+ if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) {
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) {
+ v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ for (i = 0; i <= number_of_states_plus_one; i++) {
+ for (j = 0; j <= 1; j++) {
+ v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) {
+ v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) {
+ v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ }
+ /*mode support, voltage state and soc configuration*/
+
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ for (j = 0; j <= 1; j++) {
+ if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) {
+ if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+ }
+ if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) {
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes;
+ }
+ else {
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ else {
+ v->mode_support_with_immediate_flip[i][j] = dcn_bw_no;
+ v->mode_support_without_immediate_flip[i][j] = dcn_bw_no;
+ }
+ }
+ }
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+ v->voltage_level_with_immediate_flip = i;
+ }
+ }
+ for (i = number_of_states_plus_one; i >= 0; i--) {
+ if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) {
+ v->voltage_level_without_immediate_flip = i;
+ }
+ }
+ if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) {
+ v->immediate_flip_supported = dcn_bw_no;
+ v->voltage_level = v->voltage_level_without_immediate_flip;
+ }
+ else {
+ v->immediate_flip_supported = dcn_bw_yes;
+ v->voltage_level = v->voltage_level_with_immediate_flip;
+ }
+ v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+ for (j = 0; j <= 1; j++) {
+ v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j];
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k];
+ }
+ v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j];
+ }
+ v->max_phyclk = v->phyclk_per_state[v->voltage_level];
+}
+void display_pipe_configuration(struct dcn_bw_internal_vars *v)
+{
+ int j;
+ int k;
+ /*display pipe configuration*/
+
+ for (j = 0; j <= 1; j++) {
+ v->total_number_of_active_dpp_per_ratio[j] = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k];
+ }
+ }
+ if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) {
+ v->dispclk_dppclk_ratio = 1;
+ v->final_error_message = v->error_message[0];
+ }
+ else {
+ v->dispclk_dppclk_ratio = 2;
+ v->final_error_message = v->error_message[1];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pix_dety = 8.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pix_dety = 4.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pix_dety = 2.0;
+ v->byte_per_pix_detc = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pix_dety = 1.0;
+ v->byte_per_pix_detc = 2.0;
+ }
+ else {
+ v->byte_per_pix_dety = 4.0f / 3.0f;
+ v->byte_per_pix_detc = 8.0f / 3.0f;
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_bytes_block_height_y = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->read256_bytes_block_height_y = 4.0;
+ }
+ else {
+ v->read256_bytes_block_height_y = 8.0;
+ }
+ v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+ v->read256_bytes_block_height_c = 0.0;
+ v->read256_bytes_block_width_c = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->read256_bytes_block_height_y = 1.0;
+ v->read256_bytes_block_height_c = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->read256_bytes_block_height_y = 16.0;
+ v->read256_bytes_block_height_c = 8.0;
+ }
+ else {
+ v->read256_bytes_block_height_y = 8.0;
+ v->read256_bytes_block_height_c = 8.0;
+ }
+ v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y;
+ v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->maximum_swath_height_y = v->read256_bytes_block_height_y;
+ v->maximum_swath_height_c = v->read256_bytes_block_height_c;
+ }
+ else {
+ v->maximum_swath_height_y = v->read256_bytes_block_width_y;
+ v->maximum_swath_height_c = v->read256_bytes_block_width_c;
+ }
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ }
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ else {
+ v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+ }
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) {
+ v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0;
+ if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0;
+ }
+ }
+ else {
+ v->minimum_swath_height_y = v->maximum_swath_height_y;
+ v->minimum_swath_height_c = v->maximum_swath_height_c;
+ }
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k];
+ }
+ else {
+ v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k];
+ }
+ v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y;
+ v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256;
+ }
+ if (v->maximum_swath_height_c > 0.0) {
+ v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c;
+ }
+ v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c;
+ if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) {
+ v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256;
+ }
+ if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) {
+ v->swath_height_y[k] = v->maximum_swath_height_y;
+ v->swath_height_c[k] = v->maximum_swath_height_c;
+ }
+ else {
+ v->swath_height_y[k] = v->minimum_swath_height_y;
+ v->swath_height_c[k] = v->minimum_swath_height_c;
+ }
+ if (v->swath_height_c[k] == 0.0) {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0;
+ v->det_buffer_size_c[k] = 0.0;
+ }
+ else if (v->swath_height_y[k] <= v->swath_height_c[k]) {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+ v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0;
+ }
+ else {
+ v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0;
+ v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0;
+ }
+ }
+}
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v)
+{
+ int k;
+ /*dispclk and dppclk calculation*/
+
+ v->dispclk_with_ramping = 0.0;
+ v->dispclk_without_ramping = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0);
+ if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->pscl_throughput_chroma[k] = 0.0;
+ v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma;
+ }
+ else {
+ if (v->h_ratio[k] > 1.0) {
+ v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0));
+ }
+ else {
+ v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput);
+ }
+ v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0);
+ v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma);
+ }
+ if (v->odm_capable == dcn_bw_yes) {
+ v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+ v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0));
+ }
+ else {
+ v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0));
+ v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0));
+ }
+ }
+ if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) {
+ v->dispclk = v->dispclk_without_ramping;
+ }
+ else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) {
+ v->dispclk = v->max_dispclk[number_of_states];
+ }
+ else {
+ v->dispclk = v->dispclk_with_ramping;
+ }
+ v->dppclk = v->dispclk / v->dispclk_dppclk_ratio;
+ /*urgent watermark*/
+
+ v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0);
+ v->dcc_enabled_any_plane = dcn_bw_no;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->dcc_enabled_any_plane = dcn_bw_yes;
+ }
+ }
+ v->return_bw = v->return_bandwidth_to_dcn;
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency)));
+ }
+ v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0);
+ if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) {
+ v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2));
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k];
+ }
+ else {
+ v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k];
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->byte_per_pixel_dety[k] = 8.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) {
+ v->byte_per_pixel_dety[k] = 4.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) {
+ v->byte_per_pixel_dety[k] = 2.0;
+ v->byte_per_pixel_detc[k] = 0.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->byte_per_pixel_dety[k] = 1.0;
+ v->byte_per_pixel_detc[k] = 2.0;
+ }
+ else {
+ v->byte_per_pixel_dety[k] = 4.0f / 3.0f;
+ v->byte_per_pixel_detc[k] = 8.0f / 3.0f;
+ }
+ }
+ v->total_data_read_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k];
+ v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0;
+ v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k];
+ }
+ v->total_active_dpp = 0.0;
+ v->total_dcc_active_dpp = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k];
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k];
+ }
+ }
+ v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw;
+ v->last_pixel_of_line_extra_watermark = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->v_ratio[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+ v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]);
+ if (v->byte_per_pixel_detc[k] == 0.0) {
+ v->display_pipe_line_delivery_time_chroma[k] = 0.0;
+ }
+ else {
+ if (v->v_ratio[k] / 2.0 <= 1.0) {
+ v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk;
+ }
+ v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth);
+ v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]);
+ }
+ }
+ v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw;
+ }
+ v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+ v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency;
+ /*nb p-state/dram clock change watermark*/
+
+ v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark;
+ v->total_active_writeback = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback) {
+ v->total_active_writeback = v->total_active_writeback + 1.0;
+ }
+ }
+ if (v->total_active_writeback <= 1.0) {
+ v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency;
+ }
+ else {
+ v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk;
+ }
+ /*stutter efficiency*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k];
+ v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]);
+ v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k];
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0);
+ v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]);
+ v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0);
+ }
+ else {
+ v->lines_in_detc[k] = 0.0;
+ v->lines_in_detc_rounded_down_to_swath[k] = 0.0;
+ v->full_det_buffering_time_c[k] = 999999.0;
+ }
+ }
+ v->min_full_det_buffering_time = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) {
+ v->min_full_det_buffering_time = v->full_det_buffering_time_y[k];
+ v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+ }
+ if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) {
+ v->min_full_det_buffering_time = v->full_det_buffering_time_c[k];
+ v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k];
+ }
+ }
+ v->average_read_bandwidth_gbyte_per_second = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0;
+ }
+ else {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0;
+ }
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0;
+ }
+ }
+ v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0));
+ v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0);
+ if (v->total_active_writeback == 0.0) {
+ v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0;
+ }
+ else {
+ v->stutter_efficiency_not_including_vblank = 0.0;
+ }
+ v->smallest_vblank = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+ v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k];
+ }
+ else {
+ v->v_blank_time = 0.0;
+ }
+ v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time);
+ }
+ v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0;
+ /*dcfclk deep sleep*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]);
+ }
+ else {
+ v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k];
+ }
+ v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0);
+ }
+ v->dcf_clk_deep_sleep = 8.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]);
+ }
+ /*stutter watermark*/
+
+ v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep;
+ v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency;
+ /*urgent latency supported*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]);
+ v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]);
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]);
+ v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]);
+ v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma);
+ }
+ else {
+ v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma;
+ }
+ }
+ v->min_urgent_latency_support_us = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]);
+ }
+ /*non-urgent latency tolerance*/
+
+ v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark;
+ /*prefetch*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->block_height256_bytes_y = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) {
+ v->block_height256_bytes_y = 4.0;
+ }
+ else {
+ v->block_height256_bytes_y = 8.0;
+ }
+ v->block_height256_bytes_c = 0.0;
+ }
+ else {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->block_height256_bytes_y = 1.0;
+ v->block_height256_bytes_c = 1.0;
+ }
+ else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) {
+ v->block_height256_bytes_y = 16.0;
+ v->block_height256_bytes_c = 8.0;
+ }
+ else {
+ v->block_height256_bytes_y = 8.0;
+ v->block_height256_bytes_c = 8.0;
+ }
+ }
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y);
+ v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y;
+ v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_frame_y = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+ }
+ else {
+ v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_frame_y = 0.0;
+ v->meta_row_byte_y = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_size_byte_y = 256.0;
+ v->macro_tile_height_y = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_size_byte_y = 4096.0;
+ v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_size_byte_y = 64.0 * 1024;
+ v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y;
+ }
+ else {
+ v->macro_tile_size_byte_y = 256.0 * 1024;
+ v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y;
+ }
+ if (v->macro_tile_size_byte_y <= 65536.0) {
+ v->pixel_pte_req_height_y = v->macro_tile_height_y;
+ }
+ else {
+ v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y;
+ }
+ v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1);
+ }
+ else {
+ v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1);
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_y = 0.0;
+ }
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ if (v->dcc_enable[k] == dcn_bw_yes) {
+ v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c);
+ v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c;
+ v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c;
+ if (v->pte_enable == dcn_bw_yes) {
+ v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0;
+ }
+ else {
+ v->meta_pte_bytes_frame_c = 0.0;
+ }
+ if (v->source_scan[k] == dcn_bw_hor) {
+ v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+ }
+ else {
+ v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0;
+ }
+ }
+ else {
+ v->meta_pte_bytes_frame_c = 0.0;
+ v->meta_row_byte_c = 0.0;
+ }
+ if (v->pte_enable == dcn_bw_yes) {
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->macro_tile_size_bytes_c = 256.0;
+ v->macro_tile_height_c = 1.0;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) {
+ v->macro_tile_size_bytes_c = 4096.0;
+ v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c;
+ }
+ else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) {
+ v->macro_tile_size_bytes_c = 64.0 * 1024;
+ v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c;
+ }
+ else {
+ v->macro_tile_size_bytes_c = 256.0 * 1024;
+ v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c;
+ }
+ if (v->macro_tile_size_bytes_c <= 65536.0) {
+ v->pixel_pte_req_height_c = v->macro_tile_height_c;
+ }
+ else {
+ v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c;
+ }
+ v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8;
+ if (v->source_surface_mode[k] == dcn_bw_sw_linear) {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+ }
+ else if (v->source_scan[k] == dcn_bw_hor) {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1);
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1);
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 0.0;
+ }
+ }
+ else {
+ v->pixel_pte_bytes_per_row_c = 0.0;
+ v->meta_pte_bytes_frame_c = 0.0;
+ v->meta_row_byte_c = 0.0;
+ }
+ v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c;
+ v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c;
+ v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c;
+ v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0);
+ v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1;
+ if (v->v_init_pre_fill_y[k] > 1.0) {
+ v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]);
+ }
+ else {
+ v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]);
+ }
+ v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y);
+ v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y;
+ if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) {
+ v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0);
+ v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1;
+ if (v->v_init_pre_fill_c[k] > 1.0) {
+ v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]);
+ }
+ else {
+ v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]);
+ }
+ v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c);
+ }
+ else {
+ v->max_num_swath_c[k] = 0.0;
+ v->max_partial_swath_c = 0.0;
+ }
+ v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c;
+ }
+ v->t_calc = 24.0 / v->dcf_clk_deep_sleep;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) {
+ v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0;
+ }
+ else {
+ v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0;
+ }
+ }
+ v->next_prefetch_mode = 0.0;
+ do {
+ v->v_startup_lines = 13.0;
+ do {
+ v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes;
+ v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no;
+ v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+ v->v_ratio_prefetch_more_than4 = dcn_bw_no;
+ v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no;
+ v->prefetch_mode = v->next_prefetch_mode;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk;
+ if (v->dpp_per_plane[k] > 1.0) {
+ v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0;
+ }
+ if (v->output_format[k] == dcn_bw_420) {
+ v->dsty_after_scaler = 1.0;
+ }
+ else {
+ v->dsty_after_scaler = 0.0;
+ }
+ v->v_update_offset_pix =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0);
+ v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk);
+ v->v_update_width_pix = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k];
+ v->v_ready_offset_pix =dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k];
+ v->t_setup = (v->v_update_offset_pix + v->v_update_width_pix + v->v_ready_offset_pix) / v->pixel_clock[k];
+ v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]);
+ if (v->prefetch_mode == 0.0) {
+ v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency);
+ }
+ else if (v->prefetch_mode == 1.0) {
+ v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency);
+ }
+ else {
+ v->t_wait = v->urgent_latency;
+ }
+ v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4;
+ if (v->destination_lines_for_prefetch[k] > 0.0) {
+ v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->prefetch_bandwidth[k] = 999999.0;
+ }
+ }
+ v->bandwidth_available_for_immediate_flip = v->return_bw;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]);
+ }
+ v->tot_immediate_flip_bytes = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k];
+ }
+ }
+ v->max_rd_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ else {
+ v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0);
+ }
+ }
+ else {
+ v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0;
+ }
+ v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) {
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ else {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ }
+ else {
+ v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte);
+ }
+ v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4;
+ v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k];
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data;
+ if ((v->swath_height_y[k] > 4.0)) {
+ if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) {
+ v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_prefetch_y[k] = 999999.0;
+ }
+ }
+ }
+ else {
+ v->v_ratio_prefetch_y[k] = 999999.0;
+ }
+ v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0);
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data;
+ if ((v->swath_height_c[k] > 4.0)) {
+ if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) {
+ v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0));
+ }
+ else {
+ v->v_ratio_prefetch_c[k] = 999999.0;
+ }
+ }
+ }
+ else {
+ v->v_ratio_prefetch_c[k] = 999999.0;
+ }
+ v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0);
+ if (v->lines_to_request_prefetch_pixel_data > 0.0) {
+ v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]);
+ }
+ else {
+ v->required_prefetch_pix_data_bw = 999999.0;
+ }
+ v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw);
+ if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) {
+ v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k]));
+ }
+ if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+ v->v_ratio_prefetch_more_than4 = dcn_bw_yes;
+ }
+ if (v->destination_lines_for_prefetch[k] < 2.0) {
+ v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+ }
+ if (v->max_vstartup_lines[k] > v->v_startup_lines) {
+ if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) {
+ v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no;
+ }
+ if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) {
+ v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes;
+ }
+ if (v->destination_lines_for_prefetch[k] < 2.0) {
+ v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes;
+ }
+ }
+ }
+ if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) {
+ v->prefetch_mode_supported = dcn_bw_yes;
+ }
+ else {
+ v->prefetch_mode_supported = dcn_bw_no;
+ }
+ v->v_startup_lines = v->v_startup_lines + 1.0;
+ } while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no)));
+ v->next_prefetch_mode = v->next_prefetch_mode + 1.0;
+ } while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0));
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->v_ratio_prefetch_y[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ if (v->byte_per_pixel_detc[k] == 0.0) {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0;
+ }
+ else {
+ if (v->v_ratio_prefetch_c[k] <= 1.0) {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k];
+ }
+ else {
+ v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk;
+ }
+ }
+ }
+ /*min ttuv_blank*/
+
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->prefetch_mode == 0.0) {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+ v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+ }
+ else if (v->prefetch_mode == 1.0) {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes;
+ v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark);
+ }
+ else {
+ v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no;
+ v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no;
+ v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark;
+ }
+ }
+ /*nb p-state/dram clock change support*/
+
+ v->active_dp_ps = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k];
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0);
+ v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0);
+ v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]);
+ if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k];
+ }
+ else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_y = 0.5;
+ }
+ else {
+ v->dpp_output_buffer_lines_y = 1.0;
+ }
+ if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0);
+ }
+ else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) {
+ v->dpp_output_buffer_lines_c = 0.5;
+ }
+ else {
+ v->dpp_output_buffer_lines_c = 1.0;
+ }
+ v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines);
+ v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark;
+ if (v->active_dp_ps > 1.0) {
+ v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]);
+ }
+ if (v->byte_per_pixel_detc[k] > 0.0) {
+ v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines);
+ v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+ v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark;
+ if (v->active_dp_ps > 1.0) {
+ v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]);
+ }
+ v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c);
+ }
+ else {
+ v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y;
+ }
+ if (v->output_format[k] == dcn_bw_444) {
+ v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark;
+ }
+ else {
+ v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark;
+ }
+ if (v->output[k] == dcn_bw_writeback) {
+ v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin);
+ }
+ }
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) {
+ v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark);
+ }
+ else {
+ v->v_blank_dram_clock_change_latency_margin[k] = 0.0;
+ }
+ }
+ v->min_active_dram_clock_change_margin = 999999.0;
+ v->v_blank_of_min_active_dram_clock_change_margin = 999999.0;
+ v->second_min_active_dram_clock_change_margin = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) {
+ v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+ v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+ v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+ }
+ else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) {
+ v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k];
+ }
+ }
+ v->min_vblank_dram_clock_change_margin = 999999.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) {
+ v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k];
+ }
+ }
+ if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) {
+ v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin);
+ }
+ else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) {
+ v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin);
+ }
+ else {
+ v->dram_clock_change_margin = v->min_active_dram_clock_change_margin;
+ }
+ if (v->min_active_dram_clock_change_margin > 0.0) {
+ v->dram_clock_change_support = dcn_bw_supported_in_v_active;
+ }
+ else if (v->dram_clock_change_margin > 0.0) {
+ v->dram_clock_change_support = dcn_bw_supported_in_v_blank;
+ }
+ else {
+ v->dram_clock_change_support = dcn_bw_not_supported;
+ }
+ /*maximum bandwidth used*/
+
+ v->wr_bandwidth = 0.0;
+ for (k = 0; k <= v->number_of_active_planes - 1; k++) {
+ if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) {
+ v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0;
+ }
+ else if (v->output[k] == dcn_bw_writeback) {
+ v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5;
+ }
+ }
+ v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
new file mode 100644
index 000000000000..03f06f682ead
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_AUTO_H_
+#define _DCN_CALC_AUTO_H_
+
+#include "dcn_calcs.h"
+
+void scaler_settings_calculation(struct dcn_bw_internal_vars *v);
+void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v);
+void display_pipe_configuration(struct dcn_bw_internal_vars *v);
+void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(
+ struct dcn_bw_internal_vars *v);
+
+#endif /* _DCN_CALC_AUTO_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
new file mode 100644
index 000000000000..b6abe0f3bb15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn_calc_math.h"
+
+float dcn_bw_mod(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 - arg1 * ((int) (arg1 / arg2));
+}
+
+float dcn_bw_min2(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 < arg2 ? arg1 : arg2;
+}
+
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 > arg2 ? arg1 : arg2;
+}
+float dcn_bw_max2(const float arg1, const float arg2)
+{
+ if (arg1 != arg1)
+ return arg2;
+ if (arg2 != arg2)
+ return arg1;
+ return arg1 > arg2 ? arg1 : arg2;
+}
+
+float dcn_bw_floor2(const float arg, const float significance)
+{
+ if (significance == 0)
+ return 0;
+ return ((int) (arg / significance)) * significance;
+}
+
+float dcn_bw_ceil2(const float arg, const float significance)
+{
+ float flr = dcn_bw_floor2(arg, significance);
+ if (significance == 0)
+ return 0;
+ return flr + 0.00001 >= arg ? arg : flr + significance;
+}
+
+float dcn_bw_max3(float v1, float v2, float v3)
+{
+ return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2);
+}
+
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5)
+{
+ return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5);
+}
+
+float dcn_bw_pow(float a, float exp)
+{
+ float temp;
+ /*ASSERT(exp == (int)exp);*/
+ if ((int)exp == 0)
+ return 1;
+ temp = dcn_bw_pow(a, (int)(exp / 2));
+ if (((int)exp % 2) == 0) {
+ return temp * temp;
+ } else {
+ if ((int)exp > 0)
+ return a * temp * temp;
+ else
+ return (temp * temp) / a;
+ }
+}
+
+float dcn_bw_log(float a, float b)
+{
+ int * const exp_ptr = (int *)(&a);
+ int x = *exp_ptr;
+ const int log_2 = ((x >> 23) & 255) - 128;
+ x &= ~(255 << 23);
+ x += 127 << 23;
+ *exp_ptr = x;
+
+ a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3;
+
+ if (b > 2.00001 || b < 1.99999)
+ return (a + log_2) / dcn_bw_log(b, 2);
+ else
+ return (a + log_2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
new file mode 100644
index 000000000000..f46ab0e24ca1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN_CALC_MATH_H_
+#define _DCN_CALC_MATH_H_
+
+float dcn_bw_mod(const float arg1, const float arg2);
+float dcn_bw_min2(const float arg1, const float arg2);
+unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2);
+float dcn_bw_max2(const float arg1, const float arg2);
+float dcn_bw_floor2(const float arg, const float significance);
+float dcn_bw_ceil2(const float arg, const float significance);
+float dcn_bw_max3(float v1, float v2, float v3);
+float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5);
+float dcn_bw_pow(float a, float exp);
+float dcn_bw_log(float a, float b);
+
+#endif /* _DCN_CALC_MATH_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
new file mode 100644
index 000000000000..3dce35e66b09
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -0,0 +1,1626 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn_calcs.h"
+#include "dcn_calc_auto.h"
+#include "dc.h"
+#include "dal_asic_id.h"
+
+#include "resource.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn_calc_math.h"
+
+/* Defaults from spreadsheet rev#247 */
+const struct dcn_soc_bounding_box dcn10_soc_defaults = {
+ /* latencies */
+ .sr_exit_time = 17, /*us*/
+ .sr_enter_plus_exit_time = 19, /*us*/
+ .urgent_latency = 4, /*us*/
+ .dram_clock_change_latency = 17, /*us*/
+ .write_back_latency = 12, /*us*/
+ .percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/
+
+ /* below default clocks derived from STA target base on
+ * slow-slow corner + 10% margin with voltages aligned to FCLK.
+ *
+ * Use these value if fused value doesn't make sense as earlier
+ * part don't have correct value fused */
+ /* default DCF CLK DPM on RV*/
+ .dcfclkv_max0p9 = 655, /* MHz, = 3600/5.5 */
+ .dcfclkv_nom0p8 = 626, /* MHz, = 3600/5.75 */
+ .dcfclkv_mid0p72 = 600, /* MHz, = 3600/6, bypass */
+ .dcfclkv_min0p65 = 300, /* MHz, = 3600/12, bypass */
+
+ /* default DISP CLK voltage state on RV */
+ .max_dispclk_vmax0p9 = 1108, /* MHz, = 3600/3.25 */
+ .max_dispclk_vnom0p8 = 1029, /* MHz, = 3600/3.5 */
+ .max_dispclk_vmid0p72 = 960, /* MHz, = 3600/3.75 */
+ .max_dispclk_vmin0p65 = 626, /* MHz, = 3600/5.75 */
+
+ /* default DPP CLK voltage state on RV */
+ .max_dppclk_vmax0p9 = 720, /* MHz, = 3600/5 */
+ .max_dppclk_vnom0p8 = 686, /* MHz, = 3600/5.25 */
+ .max_dppclk_vmid0p72 = 626, /* MHz, = 3600/5.75 */
+ .max_dppclk_vmin0p65 = 400, /* MHz, = 3600/9 */
+
+ /* default PHY CLK voltage state on RV */
+ .phyclkv_max0p9 = 900, /*MHz*/
+ .phyclkv_nom0p8 = 847, /*MHz*/
+ .phyclkv_mid0p72 = 800, /*MHz*/
+ .phyclkv_min0p65 = 600, /*MHz*/
+
+ /* BW depend on FCLK, MCLK, # of channels */
+ /* dual channel BW */
+ .fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/
+ .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/
+ /* single channel BW
+ .fabric_and_dram_bandwidth_vmax0p9 = 19.2f,
+ .fabric_and_dram_bandwidth_vnom0p8 = 17.066f,
+ .fabric_and_dram_bandwidth_vmid0p72 = 14.933f,
+ .fabric_and_dram_bandwidth_vmin0p65 = 12.8f,
+ */
+
+ .number_of_channels = 2,
+
+ .socclk = 208, /*MHz*/
+ .downspreading = 0.5f, /*%*/
+ .round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/
+ .urgent_out_of_order_return_per_channel = 256, /*bytes*/
+ .vmm_page_size = 4096, /*bytes*/
+ .return_bus_width = 64, /*bytes*/
+ .max_request_size = 256, /*bytes*/
+
+ /* Depends on user class (client vs embedded, workstation, etc) */
+ .percent_disp_bw_limit = 0.3f /*%*/
+};
+
+const struct dcn_ip_params dcn10_ip_defaults = {
+ .rob_buffer_size_in_kbyte = 64,
+ .det_buffer_size_in_kbyte = 164,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_in_kbyte = 8,
+ .pte_enable = dcn_bw_yes,
+ .pte_chunk_size = 2, /*kbytes*/
+ .meta_chunk_size = 2, /*kbytes*/
+ .writeback_chunk_size = 2, /*kbytes*/
+ .odm_capability = dcn_bw_no,
+ .dsc_capability = dcn_bw_no,
+ .line_buffer_size = 589824, /*bit*/
+ .max_line_buffer_lines = 12,
+ .is_line_buffer_bpp_fixed = dcn_bw_no,
+ .line_buffer_fixed_bpp = dcn_bw_na,
+ .writeback_luma_buffer_size = 12, /*kbytes*/
+ .writeback_chroma_buffer_size = 8, /*kbytes*/
+ .max_num_dpp = 4,
+ .max_num_writeback = 2,
+ .max_dchub_topscl_throughput = 4, /*pixels/dppclk*/
+ .max_pscl_tolb_throughput = 2, /*pixels/dppclk*/
+ .max_lb_tovscl_throughput = 4, /*pixels/dppclk*/
+ .max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/
+ .max_hscl_ratio = 4,
+ .max_vscl_ratio = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .pte_buffer_size_in_requests = 42,
+ .dispclk_ramping_margin = 1, /*%*/
+ .under_scan_factor = 1.11f,
+ .max_inter_dcn_tile_repeaters = 8,
+ .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no,
+ .bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no,
+ .dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/
+};
+
+static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode)
+{
+ switch (sw_mode) {
+ case DC_SW_LINEAR:
+ return dcn_bw_sw_linear;
+ case DC_SW_4KB_S:
+ return dcn_bw_sw_4_kb_s;
+ case DC_SW_4KB_D:
+ return dcn_bw_sw_4_kb_d;
+ case DC_SW_64KB_S:
+ return dcn_bw_sw_64_kb_s;
+ case DC_SW_64KB_D:
+ return dcn_bw_sw_64_kb_d;
+ case DC_SW_VAR_S:
+ return dcn_bw_sw_var_s;
+ case DC_SW_VAR_D:
+ return dcn_bw_sw_var_d;
+ case DC_SW_64KB_S_T:
+ return dcn_bw_sw_64_kb_s_t;
+ case DC_SW_64KB_D_T:
+ return dcn_bw_sw_64_kb_d_t;
+ case DC_SW_4KB_S_X:
+ return dcn_bw_sw_4_kb_s_x;
+ case DC_SW_4KB_D_X:
+ return dcn_bw_sw_4_kb_d_x;
+ case DC_SW_64KB_S_X:
+ return dcn_bw_sw_64_kb_s_x;
+ case DC_SW_64KB_D_X:
+ return dcn_bw_sw_64_kb_d_x;
+ case DC_SW_VAR_S_X:
+ return dcn_bw_sw_var_s_x;
+ case DC_SW_VAR_D_X:
+ return dcn_bw_sw_var_d_x;
+ case DC_SW_256B_S:
+ case DC_SW_256_D:
+ case DC_SW_256_R:
+ case DC_SW_4KB_R:
+ case DC_SW_64KB_R:
+ case DC_SW_VAR_R:
+ case DC_SW_4KB_R_X:
+ case DC_SW_64KB_R_X:
+ case DC_SW_VAR_R_X:
+ default:
+ BREAK_TO_DEBUGGER(); /*not in formula*/
+ return dcn_bw_sw_4_kb_s;
+ }
+}
+
+static int tl_lb_bpp_to_int(enum lb_pixel_depth depth)
+{
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ return 18;
+ case LB_PIXEL_DEPTH_24BPP:
+ return 24;
+ case LB_PIXEL_DEPTH_30BPP:
+ return 30;
+ case LB_PIXEL_DEPTH_36BPP:
+ return 36;
+ default:
+ return 30;
+ }
+}
+
+static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format)
+{
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ return dcn_bw_rgb_sub_16;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ return dcn_bw_rgb_sub_32;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ return dcn_bw_rgb_sub_64;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ return dcn_bw_yuv420_sub_8;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ return dcn_bw_yuv420_sub_10;
+ default:
+ return dcn_bw_rgb_sub_32;
+ }
+}
+
+static void pipe_ctx_to_e2e_pipe_params (
+ const struct pipe_ctx *pipe,
+ struct _vcs_dpi_display_pipe_params_st *input)
+{
+ input->src.is_hsplit = false;
+ if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state)
+ input->src.is_hsplit = true;
+ else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state)
+ input->src.is_hsplit = true;
+
+ input->src.dcc = pipe->plane_state->dcc.enable;
+ input->src.dcc_rate = 1;
+ input->src.meta_pitch = pipe->plane_state->dcc.grph.meta_pitch;
+ input->src.source_scan = dm_horz;
+ input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle;
+
+ input->src.viewport_width = pipe->plane_res.scl_data.viewport.width;
+ input->src.viewport_height = pipe->plane_res.scl_data.viewport.height;
+ input->src.data_pitch = pipe->plane_res.scl_data.viewport.width;
+ input->src.data_pitch_c = pipe->plane_res.scl_data.viewport.width;
+ input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
+ input->src.cur0_bpp = 32;
+
+ switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
+ /* for 4/8/16 high tiles */
+ case DC_SW_LINEAR:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_X:
+ case DC_SW_64KB_S_T:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_64k_tile;
+ break;
+ case DC_SW_VAR_S:
+ case DC_SW_VAR_S_X:
+ input->src.is_display_sw = 0;
+ input->src.macro_tile_size = dm_256k_tile;
+ break;
+
+ /* For 64bpp 2 high tiles */
+ case DC_SW_4KB_D:
+ case DC_SW_4KB_D_X:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_4k_tile;
+ break;
+ case DC_SW_64KB_D:
+ case DC_SW_64KB_D_X:
+ case DC_SW_64KB_D_T:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_64k_tile;
+ break;
+ case DC_SW_VAR_D:
+ case DC_SW_VAR_D_X:
+ input->src.is_display_sw = 1;
+ input->src.macro_tile_size = dm_256k_tile;
+ break;
+
+ /* Unsupported swizzle modes for dcn */
+ case DC_SW_256B_S:
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+
+ switch (pipe->plane_state->rotation) {
+ case ROTATION_ANGLE_0:
+ case ROTATION_ANGLE_180:
+ input->src.source_scan = dm_horz;
+ break;
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ input->src.source_scan = dm_vert;
+ break;
+ default:
+ ASSERT(0); /* Not supported */
+ break;
+ }
+
+ /* TODO: Fix pixel format mappings */
+ switch (pipe->plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ input->src.source_format = dm_420_8;
+ input->src.viewport_width_c = input->src.viewport_width / 2;
+ input->src.viewport_height_c = input->src.viewport_height / 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ input->src.source_format = dm_420_10;
+ input->src.viewport_width_c = input->src.viewport_width / 2;
+ input->src.viewport_height_c = input->src.viewport_height / 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ input->src.source_format = dm_444_64;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+ default:
+ input->src.source_format = dm_444_32;
+ input->src.viewport_width_c = input->src.viewport_width;
+ input->src.viewport_height_c = input->src.viewport_height;
+ break;
+ }
+
+ input->scale_taps.htaps = pipe->plane_res.scl_data.taps.h_taps;
+ input->scale_ratio_depth.hscl_ratio = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0;
+ input->scale_ratio_depth.vscl_ratio = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0;
+ input->scale_ratio_depth.vinit = pipe->plane_res.scl_data.inits.v.value/4294967296.0;
+ if (input->scale_ratio_depth.vinit < 1.0)
+ input->scale_ratio_depth.vinit = 1;
+ input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps;
+ input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c;
+ input->scale_taps.htaps_c = pipe->plane_res.scl_data.taps.h_taps_c;
+ input->scale_ratio_depth.hscl_ratio_c = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0;
+ input->scale_ratio_depth.vscl_ratio_c = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0;
+ input->scale_ratio_depth.vinit_c = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0;
+ if (input->scale_ratio_depth.vinit_c < 1.0)
+ input->scale_ratio_depth.vinit_c = 1;
+ switch (pipe->plane_res.scl_data.lb_params.depth) {
+ case LB_PIXEL_DEPTH_30BPP:
+ input->scale_ratio_depth.lb_depth = 30; break;
+ case LB_PIXEL_DEPTH_36BPP:
+ input->scale_ratio_depth.lb_depth = 36; break;
+ default:
+ input->scale_ratio_depth.lb_depth = 24; break;
+ }
+
+
+ input->dest.vactive = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top
+ + pipe->stream->timing.v_border_bottom;
+
+ input->dest.recout_width = pipe->plane_res.scl_data.recout.width;
+ input->dest.recout_height = pipe->plane_res.scl_data.recout.height;
+
+ input->dest.full_recout_width = pipe->plane_res.scl_data.recout.width;
+ input->dest.full_recout_height = pipe->plane_res.scl_data.recout.height;
+
+ input->dest.htotal = pipe->stream->timing.h_total;
+ input->dest.hblank_start = input->dest.htotal - pipe->stream->timing.h_front_porch;
+ input->dest.hblank_end = input->dest.hblank_start
+ - pipe->stream->timing.h_addressable
+ - pipe->stream->timing.h_border_left
+ - pipe->stream->timing.h_border_right;
+
+ input->dest.vtotal = pipe->stream->timing.v_total;
+ input->dest.vblank_start = input->dest.vtotal - pipe->stream->timing.v_front_porch;
+ input->dest.vblank_end = input->dest.vblank_start
+ - pipe->stream->timing.v_addressable
+ - pipe->stream->timing.v_border_bottom
+ - pipe->stream->timing.v_border_top;
+ input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_khz/1000.0;
+ input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start;
+ input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+ input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset;
+ input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width;
+
+}
+
+static void dcn_bw_calc_rq_dlg_ttu(
+ const struct dc *dc,
+ const struct dcn_bw_internal_vars *v,
+ struct pipe_ctx *pipe,
+ int in_idx)
+{
+ struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml);
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
+ struct _vcs_dpi_display_rq_params_st rq_param = {0};
+ struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0};
+ struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } };
+ float total_active_bw = 0;
+ float total_prefetch_bw = 0;
+ int total_flip_bytes = 0;
+ int i;
+
+ for (i = 0; i < number_of_planes; i++) {
+ total_active_bw += v->read_bandwidth[i];
+ total_prefetch_bw += v->prefetch_bandwidth[i];
+ total_flip_bytes += v->total_immediate_flip_bytes[i];
+ }
+ dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
+ if (dlg_sys_param.total_flip_bw < 0.0)
+ dlg_sys_param.total_flip_bw = 0;
+
+ dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark;
+ dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
+ dlg_sys_param.t_urg_wm_us = v->urgent_watermark;
+ dlg_sys_param.t_extra_us = v->urgent_extra_latency;
+ dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
+ dlg_sys_param.total_flip_bytes = total_flip_bytes;
+
+ pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe);
+ input.clks_cfg.dcfclk_mhz = v->dcfclk;
+ input.clks_cfg.dispclk_mhz = v->dispclk;
+ input.clks_cfg.dppclk_mhz = v->dppclk;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+ input.clks_cfg.socclk_mhz = v->socclk;
+ input.clks_cfg.voltage = v->voltage_level;
+// dc->dml.logger = pool->base.logger;
+ input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
+ input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
+ //input[in_idx].dout.output_standard;
+ switch (v->output_deep_color[in_idx]) {
+ case dcn_bw_encoder_12bpc:
+ input.dout.output_bpc = dm_out_12;
+ break;
+ case dcn_bw_encoder_10bpc:
+ input.dout.output_bpc = dm_out_10;
+ break;
+ case dcn_bw_encoder_8bpc:
+ default:
+ input.dout.output_bpc = dm_out_8;
+ break;
+ }
+
+ /*todo: soc->sr_enter_plus_exit_time??*/
+ dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
+
+ dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src);
+ dml1_extract_rq_regs(dml, rq_regs, rq_param);
+ dml1_rq_dlg_get_dlg_params(
+ dml,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ input,
+ true,
+ true,
+ v->pte_enable == dcn_bw_yes,
+ pipe->plane_state->flip_immediate);
+}
+
+static void split_stream_across_pipes(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct pipe_ctx *primary_pipe,
+ struct pipe_ctx *secondary_pipe)
+{
+ int pipe_idx = secondary_pipe->pipe_idx;
+
+ if (!primary_pipe->plane_state)
+ return;
+
+ *secondary_pipe = *primary_pipe;
+
+ secondary_pipe->pipe_idx = pipe_idx;
+ secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx];
+ secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx];
+ if (primary_pipe->bottom_pipe) {
+ ASSERT(primary_pipe->bottom_pipe != secondary_pipe);
+ secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe;
+ secondary_pipe->bottom_pipe->top_pipe = secondary_pipe;
+ }
+ primary_pipe->bottom_pipe = secondary_pipe;
+ secondary_pipe->top_pipe = primary_pipe;
+
+ resource_build_scaling_params(primary_pipe);
+ resource_build_scaling_params(secondary_pipe);
+}
+
+static void calc_wm_sets_and_perf_params(
+ struct dc_state *context,
+ struct dcn_bw_internal_vars *v)
+{
+ /* Calculate set A last to keep internal var state consistent for required config */
+ if (v->voltage_level < 2) {
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+
+ v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
+ v->dcfclk = v->dcfclkv_nom0p8;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+ }
+
+ if (v->voltage_level < 3) {
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->dcfclk_per_state[2] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[1] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[0] = v->dcfclkv_max0p9;
+ v->dcfclk = v->dcfclkv_max0p9;
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+ }
+
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+ v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level];
+ v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+ v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+ v->dcfclk = v->dcfclk_per_state[v->voltage_level];
+ dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
+
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ v->stutter_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ v->stutter_enter_plus_exit_watermark * 1000;
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ v->dram_clock_change_watermark * 1000;
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ if (v->voltage_level >= 2) {
+ context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
+ context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ }
+ if (v->voltage_level >= 3)
+ context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+}
+
+static bool dcn_bw_apply_registry_override(struct dc *dc)
+{
+ bool updated = false;
+
+ kernel_fpu_begin();
+ if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns
+ && dc->debug.sr_exit_time_ns) {
+ updated = true;
+ dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000)
+ != dc->debug.sr_enter_plus_exit_time_ns
+ && dc->debug.sr_enter_plus_exit_time_ns) {
+ updated = true;
+ dc->dcn_soc->sr_enter_plus_exit_time =
+ dc->debug.sr_enter_plus_exit_time_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns
+ && dc->debug.urgent_latency_ns) {
+ updated = true;
+ dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0;
+ }
+
+ if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000)
+ != dc->debug.percent_of_ideal_drambw
+ && dc->debug.percent_of_ideal_drambw) {
+ updated = true;
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency =
+ dc->debug.percent_of_ideal_drambw;
+ }
+
+ if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ updated = true;
+ dc->dcn_soc->dram_clock_change_latency =
+ dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
+ kernel_fpu_end();
+
+ return updated;
+}
+
+void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v)
+{
+ /*
+ * disable optional pipe split by lower dispclk bounding box
+ * at DPM0
+ */
+ v->max_dispclk[0] = v->max_dppclk_vmin0p65;
+}
+
+void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
+ unsigned int pixel_rate_khz)
+{
+ float pixel_rate_mhz = pixel_rate_khz / 1000;
+
+ /*
+ * force enabling pipe split by lower dpp clock for DPM0 to just
+ * below the specify pixel_rate, so bw calc would split pipe.
+ */
+ if (pixel_rate_mhz < v->max_dppclk[0])
+ v->max_dppclk[0] = pixel_rate_mhz;
+}
+
+void hack_bounding_box(struct dcn_bw_internal_vars *v,
+ struct dc_debug *dbg,
+ struct dc_state *context)
+{
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) {
+ hack_disable_optional_pipe_split(v);
+ }
+
+ if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP &&
+ context->stream_count >= 2) {
+ hack_disable_optional_pipe_split(v);
+ }
+
+ if (context->stream_count == 1 &&
+ dbg->force_single_disp_pipe_split) {
+ struct dc_stream_state *stream0 = context->streams[0];
+
+ hack_force_pipe_split(v, stream0->timing.pix_clk_khz);
+ }
+}
+
+bool dcn_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ const struct resource_pool *pool = dc->res_pool;
+ struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
+ int i, input_idx;
+ int vesa_sync_start, asic_blank_end, asic_blank_start;
+ bool bw_limit_pass;
+ float bw_limit;
+
+ PERFORMANCE_TRACE_START();
+ if (dcn_bw_apply_registry_override(dc))
+ dcn_bw_sync_calcs_and_dml(dc);
+
+ memset(v, 0, sizeof(*v));
+ kernel_fpu_begin();
+ v->sr_exit_time = dc->dcn_soc->sr_exit_time;
+ v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time;
+ v->urgent_latency = dc->dcn_soc->urgent_latency;
+ v->write_back_latency = dc->dcn_soc->write_back_latency;
+ v->percent_of_ideal_drambw_received_after_urg_latency =
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+
+ v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65;
+ v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72;
+ v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8;
+ v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9;
+
+ v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65;
+ v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72;
+ v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8;
+ v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9;
+
+ v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65;
+ v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72;
+ v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8;
+ v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9;
+
+ v->socclk = dc->dcn_soc->socclk;
+
+ v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+ v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+ v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65;
+ v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72;
+ v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8;
+ v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9;
+
+ v->downspreading = dc->dcn_soc->downspreading;
+ v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles;
+ v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel;
+ v->number_of_channels = dc->dcn_soc->number_of_channels;
+ v->vmm_page_size = dc->dcn_soc->vmm_page_size;
+ v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency;
+ v->return_bus_width = dc->dcn_soc->return_bus_width;
+
+ v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte;
+ v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte;
+ v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+ v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+ v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+ v->pte_enable = dc->dcn_ip->pte_enable;
+ v->pte_chunk_size = dc->dcn_ip->pte_chunk_size;
+ v->meta_chunk_size = dc->dcn_ip->meta_chunk_size;
+ v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size;
+ v->odm_capability = dc->dcn_ip->odm_capability;
+ v->dsc_capability = dc->dcn_ip->dsc_capability;
+ v->line_buffer_size = dc->dcn_ip->line_buffer_size;
+ v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed;
+ v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp;
+ v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+ v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size;
+ v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size;
+ v->max_num_dpp = dc->dcn_ip->max_num_dpp;
+ v->max_num_writeback = dc->dcn_ip->max_num_writeback;
+ v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput;
+ v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput;
+ v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput;
+ v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput;
+ v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+ v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+ v->max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+ v->max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+ v->under_scan_factor = dc->dcn_ip->under_scan_factor;
+ v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests;
+ v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin;
+ v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+ v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ v->bug_forcing_luma_and_chroma_request_to_same_size_fixed =
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+
+ v->voltage[5] = dcn_bw_no_support;
+ v->voltage[4] = dcn_bw_v_max0p9;
+ v->voltage[3] = dcn_bw_v_max0p9;
+ v->voltage[2] = dcn_bw_v_nom0p8;
+ v->voltage[1] = dcn_bw_v_mid0p72;
+ v->voltage[0] = dcn_bw_v_min0p65;
+ v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9;
+ v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
+ v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72;
+ v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65;
+ v->dcfclk_per_state[5] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[4] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[3] = v->dcfclkv_max0p9;
+ v->dcfclk_per_state[2] = v->dcfclkv_nom0p8;
+ v->dcfclk_per_state[1] = v->dcfclkv_mid0p72;
+ v->dcfclk_per_state[0] = v->dcfclkv_min0p65;
+ v->max_dispclk[5] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[4] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[3] = v->max_dispclk_vmax0p9;
+ v->max_dispclk[2] = v->max_dispclk_vnom0p8;
+ v->max_dispclk[1] = v->max_dispclk_vmid0p72;
+ v->max_dispclk[0] = v->max_dispclk_vmin0p65;
+ v->max_dppclk[5] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[4] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[3] = v->max_dppclk_vmax0p9;
+ v->max_dppclk[2] = v->max_dppclk_vnom0p8;
+ v->max_dppclk[1] = v->max_dppclk_vmid0p72;
+ v->max_dppclk[0] = v->max_dppclk_vmin0p65;
+ v->phyclk_per_state[5] = v->phyclkv_max0p9;
+ v->phyclk_per_state[4] = v->phyclkv_max0p9;
+ v->phyclk_per_state[3] = v->phyclkv_max0p9;
+ v->phyclk_per_state[2] = v->phyclkv_nom0p8;
+ v->phyclk_per_state[1] = v->phyclkv_mid0p72;
+ v->phyclk_per_state[0] = v->phyclkv_min0p65;
+
+ hack_bounding_box(v, &dc->debug, context);
+
+ if (v->voltage_override == dcn_bw_v_max0p9) {
+ v->voltage_override_level = number_of_states - 1;
+ } else if (v->voltage_override == dcn_bw_v_nom0p8) {
+ v->voltage_override_level = number_of_states - 2;
+ } else if (v->voltage_override == dcn_bw_v_mid0p72) {
+ v->voltage_override_level = number_of_states - 3;
+ } else {
+ v->voltage_override_level = 0;
+ }
+ v->synchronized_vblank = dcn_bw_no;
+ v->ta_pscalculation = dcn_bw_override;
+ v->allow_different_hratio_vratio = dcn_bw_yes;
+
+
+ for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+ /* skip all but first of split pipes */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ continue;
+
+ v->underscan_output[input_idx] = false; /* taken care of in recout already*/
+ v->interlace_output[input_idx] = false;
+
+ v->htotal[input_idx] = pipe->stream->timing.h_total;
+ v->vtotal[input_idx] = pipe->stream->timing.v_total;
+ v->vactive[input_idx] = pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom;
+ v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total
+ - v->vactive[input_idx]
+ - pipe->stream->timing.v_front_porch;
+ v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_khz / 1000.0f;
+
+ if (!pipe->plane_state) {
+ v->dcc_enable[input_idx] = dcn_bw_yes;
+ v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32;
+ v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s;
+ v->lb_bit_per_pixel[input_idx] = 30;
+ v->viewport_width[input_idx] = pipe->stream->timing.h_addressable;
+ v->viewport_height[input_idx] = pipe->stream->timing.v_addressable;
+ v->scaler_rec_out_width[input_idx] = pipe->stream->timing.h_addressable;
+ v->scaler_recout_height[input_idx] = pipe->stream->timing.v_addressable;
+ v->override_hta_ps[input_idx] = 1;
+ v->override_vta_ps[input_idx] = 1;
+ v->override_hta_pschroma[input_idx] = 1;
+ v->override_vta_pschroma[input_idx] = 1;
+ v->source_scan[input_idx] = dcn_bw_hor;
+
+ } else {
+ v->viewport_height[input_idx] = pipe->plane_res.scl_data.viewport.height;
+ v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width;
+ v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width;
+ v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height;
+ if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) {
+ if (pipe->plane_state->rotation % 2 == 0) {
+ int viewport_end = pipe->plane_res.scl_data.viewport.width
+ + pipe->plane_res.scl_data.viewport.x;
+ int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width
+ + pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+
+ if (viewport_end > viewport_b_end)
+ v->viewport_width[input_idx] = viewport_end
+ - pipe->bottom_pipe->plane_res.scl_data.viewport.x;
+ else
+ v->viewport_width[input_idx] = viewport_b_end
+ - pipe->plane_res.scl_data.viewport.x;
+ } else {
+ int viewport_end = pipe->plane_res.scl_data.viewport.height
+ + pipe->plane_res.scl_data.viewport.y;
+ int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height
+ + pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+
+ if (viewport_end > viewport_b_end)
+ v->viewport_height[input_idx] = viewport_end
+ - pipe->bottom_pipe->plane_res.scl_data.viewport.y;
+ else
+ v->viewport_height[input_idx] = viewport_b_end
+ - pipe->plane_res.scl_data.viewport.y;
+ }
+ v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width
+ + pipe->bottom_pipe->plane_res.scl_data.recout.width;
+ }
+
+ v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no;
+ v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs(
+ pipe->plane_state->format);
+ v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs(
+ pipe->plane_state->tiling_info.gfx9.swizzle);
+ v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth);
+ v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps;
+ v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
+ v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
+ v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
+ v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
+ }
+ if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
+ v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp;
+ v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/
+ v->output_format[input_idx] = pipe->stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444;
+ v->output[input_idx] = pipe->stream->sink->sink_signal ==
+ SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp;
+ v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc;
+ if (v->output[input_idx] == dcn_bw_hdmi) {
+ switch (pipe->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_101010:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc;
+ break;
+ case COLOR_DEPTH_121212:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_12bpc;
+ break;
+ case COLOR_DEPTH_161616:
+ v->output_deep_color[input_idx] = dcn_bw_encoder_16bpc;
+ break;
+ default:
+ break;
+ }
+ }
+
+ input_idx++;
+ }
+ v->number_of_active_planes = input_idx;
+
+ scaler_settings_calculation(v);
+ mode_support_and_system_configuration(v);
+
+ if (v->voltage_level == 0 &&
+ (dc->debug.sr_exit_time_dpm0_ns
+ || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) {
+
+ if (dc->debug.sr_enter_plus_exit_time_dpm0_ns)
+ v->sr_enter_plus_exit_time =
+ dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
+ if (dc->debug.sr_exit_time_dpm0_ns)
+ v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
+ dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+ dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ mode_support_and_system_configuration(v);
+ }
+
+ if (v->voltage_level != 5) {
+ float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+ if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
+ bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
+ else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
+ bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72;
+ else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8)
+ bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8;
+ else
+ bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9;
+
+ if (bw_consumed < v->fabric_and_dram_bandwidth)
+ if (dc->debug.voltage_align_fclk)
+ bw_consumed = v->fabric_and_dram_bandwidth;
+
+ display_pipe_configuration(v);
+ calc_wm_sets_and_perf_params(context, v);
+ context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ (ddr4_dram_factor_single_Channel * v->number_of_channels));
+ if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
+ context->bw.dcn.calc_clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ }
+
+ context->bw.dcn.calc_clk.dram_ccm_us = (int)(v->dram_clock_change_margin);
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us = (int)(v->min_active_dram_clock_change_margin);
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw.dcn.calc_clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+
+ context->bw.dcn.calc_clk.dispclk_khz = (int)(v->dispclk * 1000);
+ if (dc->debug.max_disp_clk == true)
+ context->bw.dcn.calc_clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+
+ if (context->bw.dcn.calc_clk.dispclk_khz <
+ dc->debug.min_disp_clk_khz) {
+ context->bw.dcn.calc_clk.dispclk_khz =
+ dc->debug.min_disp_clk_khz;
+ }
+
+ context->bw.dcn.calc_clk.dppclk_div = (int)(v->dispclk_dppclk_ratio) == 2;
+
+ for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /* skip inactive pipe */
+ if (!pipe->stream)
+ continue;
+ /* skip all but first of split pipes */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ continue;
+
+ pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+ pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+ pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+ pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+ vesa_sync_start = pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_bottom +
+ pipe->stream->timing.v_front_porch;
+
+ asic_blank_end = (pipe->stream->timing.v_total -
+ vesa_sync_start -
+ pipe->stream->timing.v_border_top)
+ * (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+ asic_blank_start = asic_blank_end +
+ (pipe->stream->timing.v_border_top +
+ pipe->stream->timing.v_addressable +
+ pipe->stream->timing.v_border_bottom)
+ * (pipe->stream->timing.flags.INTERLACE ? 1 : 0);
+
+ pipe->pipe_dlg_param.vblank_start = asic_blank_start;
+ pipe->pipe_dlg_param.vblank_end = asic_blank_end;
+
+ if (pipe->plane_state) {
+ struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe;
+
+ if (v->dpp_per_plane[input_idx] == 2 ||
+ ((pipe->stream->view_format ==
+ VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe->stream->view_format ==
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
+ (pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
+ pipe->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE))) {
+ if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* update previously split pipe */
+ hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx];
+ hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx];
+ hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx];
+
+ hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total;
+ hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total;
+ hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start;
+ hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
+ } else {
+ /* pipe not split previously needs split */
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+ ASSERT(hsplit_pipe);
+ split_stream_across_pipes(
+ &context->res_ctx, pool,
+ pipe, hsplit_pipe);
+ }
+
+ dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx);
+ } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
+ /* merge previously split pipe */
+ pipe->bottom_pipe = hsplit_pipe->bottom_pipe;
+ if (hsplit_pipe->bottom_pipe)
+ hsplit_pipe->bottom_pipe->top_pipe = pipe;
+ hsplit_pipe->plane_state = NULL;
+ hsplit_pipe->stream = NULL;
+ hsplit_pipe->top_pipe = NULL;
+ hsplit_pipe->bottom_pipe = NULL;
+ resource_build_scaling_params(pipe);
+ }
+ /* for now important to do this after pipe split for building e2e params */
+ dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx);
+ }
+
+ input_idx++;
+ }
+ }
+
+ if (v->voltage_level == 0) {
+
+ dc->dml.soc.sr_enter_plus_exit_time_us =
+ dc->dcn_soc->sr_enter_plus_exit_time;
+ dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ }
+
+ /*
+ * BW limit is set to prevent display from impacting other system functions
+ */
+
+ bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9;
+ bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit;
+
+ kernel_fpu_end();
+
+ PERFORMANCE_TRACE_END();
+
+ if (bw_limit_pass && v->voltage_level != 5)
+ return true;
+ else
+ return false;
+}
+
+static unsigned int dcn_find_normalized_clock_vdd_Level(
+ const struct dc *dc,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz)
+{
+ int vdd_level = dcn_bw_v_min0p65;
+
+ if (clocks_in_khz == 0)/*todo some clock not in the considerations*/
+ return vdd_level;
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ case DM_PP_CLOCK_TYPE_DPPCLK:
+ if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ case DM_PP_CLOCK_TYPE_MEMORY_CLK:
+ {
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+ if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ }
+ break;
+
+ case DM_PP_CLOCK_TYPE_DCFCLK:
+ if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) {
+ vdd_level = dcn_bw_v_max0p91;
+ BREAK_TO_DEBUGGER();
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) {
+ vdd_level = dcn_bw_v_max0p9;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) {
+ vdd_level = dcn_bw_v_nom0p8;
+ } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) {
+ vdd_level = dcn_bw_v_mid0p72;
+ } else
+ vdd_level = dcn_bw_v_min0p65;
+ break;
+
+ default:
+ break;
+ }
+ return vdd_level;
+}
+
+unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+ struct clocks_value *clocks)
+{
+ unsigned vdd_level, vdd_level_temp;
+ unsigned dcf_clk;
+
+ /*find a common supported voltage level*/
+ vdd_level = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_in_khz);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_in_khz);
+
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_in_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->dcfclock_in_khz);
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ vdd_level_temp = dcn_find_normalized_clock_vdd_Level(
+ dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclock_in_khz);
+
+ /*find that level conresponding dcfclk*/
+ vdd_level = dcn_bw_max(vdd_level, vdd_level_temp);
+ if (vdd_level == dcn_bw_v_max0p91) {
+ BREAK_TO_DEBUGGER();
+ dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
+ } else if (vdd_level == dcn_bw_v_max0p9)
+ dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000;
+ else if (vdd_level == dcn_bw_v_nom0p8)
+ dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000;
+ else if (vdd_level == dcn_bw_v_mid0p72)
+ dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000;
+ else
+ dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000;
+
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\tdcf_clk for voltage = %d\n", dcf_clk);
+ return dcf_clk;
+}
+
+void dcn_bw_update_from_pplib(struct dc *dc)
+{
+ struct dc_context *ctx = dc->ctx;
+ struct dm_pp_clock_levels_with_voltage clks = {0};
+
+ kernel_fpu_begin();
+
+ /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
+
+ if (dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_FCLK, &clks) &&
+ clks.num_levels != 0) {
+ ASSERT(clks.num_levels >= 3);
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (clks.data[0].clocks_in_khz / 1000.0) / 1000.0;
+ if (clks.num_levels > 2) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ } else {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ }
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels *
+ (clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0) * ddr4_dram_factor_single_Channel / 1000.0;
+ } else
+ BREAK_TO_DEBUGGER();
+ if (dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DCFCLK, &clks) &&
+ clks.num_levels >= 3) {
+ dc->dcn_soc->dcfclkv_min0p65 = clks.data[0].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_mid0p72 = clks.data[clks.num_levels - 3].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_nom0p8 = clks.data[clks.num_levels - 2].clocks_in_khz / 1000.0;
+ dc->dcn_soc->dcfclkv_max0p9 = clks.data[clks.num_levels - 1].clocks_in_khz / 1000.0;
+ } else
+ BREAK_TO_DEBUGGER();
+
+ kernel_fpu_end();
+}
+
+void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
+{
+ struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_wm_range_sets ranges = {0};
+ int max_fclk_khz, nom_fclk_khz, mid_fclk_khz, min_fclk_khz;
+ int max_dcfclk_khz, min_dcfclk_khz;
+ int socclk_khz;
+ const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
+ unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels);
+
+ if (!pp->set_wm_ranges)
+ return;
+
+ kernel_fpu_begin();
+ max_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000000 / factor;
+ nom_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000000 / factor;
+ mid_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000000 / factor;
+ min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32;
+ max_dcfclk_khz = dc->dcn_soc->dcfclkv_max0p9 * 1000;
+ min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000;
+ socclk_khz = dc->dcn_soc->socclk * 1000;
+ kernel_fpu_end();
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont
+ * care what the value is, hence min to overdrive level
+ */
+ ranges.num_reader_wm_sets = WM_COUNT;
+ ranges.num_writer_wm_sets = WM_COUNT;
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = min_fclk_khz;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = min_fclk_khz;
+
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+ ranges.reader_wm_sets[1].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[1].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[1].min_fill_clk_khz = mid_fclk_khz;
+ ranges.reader_wm_sets[1].max_fill_clk_khz = mid_fclk_khz;
+ ranges.writer_wm_sets[1].wm_inst = WM_B;
+ ranges.writer_wm_sets[1].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[1].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[1].min_drain_clk_khz = mid_fclk_khz;
+ ranges.writer_wm_sets[1].max_drain_clk_khz = mid_fclk_khz;
+
+
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+ ranges.reader_wm_sets[2].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[2].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[2].min_fill_clk_khz = nom_fclk_khz;
+ ranges.reader_wm_sets[2].max_fill_clk_khz = nom_fclk_khz;
+ ranges.writer_wm_sets[2].wm_inst = WM_C;
+ ranges.writer_wm_sets[2].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[2].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[2].min_drain_clk_khz = nom_fclk_khz;
+ ranges.writer_wm_sets[2].max_drain_clk_khz = nom_fclk_khz;
+
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+ ranges.reader_wm_sets[3].min_drain_clk_khz = min_fclk_khz;
+ ranges.reader_wm_sets[3].max_drain_clk_khz = max_dcfclk_khz;
+ ranges.reader_wm_sets[3].min_fill_clk_khz = max_fclk_khz;
+ ranges.reader_wm_sets[3].max_fill_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[3].wm_inst = WM_D;
+ ranges.writer_wm_sets[3].min_fill_clk_khz = socclk_khz;
+ ranges.writer_wm_sets[3].max_fill_clk_khz = overdrive;
+ ranges.writer_wm_sets[3].min_drain_clk_khz = max_fclk_khz;
+ ranges.writer_wm_sets[3].max_drain_clk_khz = max_fclk_khz;
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
+ ranges.reader_wm_sets[0].wm_inst = WM_A;
+ ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[0].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
+ ranges.reader_wm_sets[0].max_fill_clk_khz = 800000;
+ ranges.writer_wm_sets[0].wm_inst = WM_A;
+ ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[0].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
+ ranges.writer_wm_sets[0].max_drain_clk_khz = 800000;
+
+ ranges.reader_wm_sets[1].wm_inst = WM_B;
+ ranges.reader_wm_sets[1].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[1].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[1].min_fill_clk_khz = 933000;
+ ranges.reader_wm_sets[1].max_fill_clk_khz = 933000;
+ ranges.writer_wm_sets[1].wm_inst = WM_B;
+ ranges.writer_wm_sets[1].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[1].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[1].min_drain_clk_khz = 933000;
+ ranges.writer_wm_sets[1].max_drain_clk_khz = 933000;
+
+
+ ranges.reader_wm_sets[2].wm_inst = WM_C;
+ ranges.reader_wm_sets[2].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[2].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[2].min_fill_clk_khz = 1067000;
+ ranges.reader_wm_sets[2].max_fill_clk_khz = 1067000;
+ ranges.writer_wm_sets[2].wm_inst = WM_C;
+ ranges.writer_wm_sets[2].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[2].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[2].min_drain_clk_khz = 1067000;
+ ranges.writer_wm_sets[2].max_drain_clk_khz = 1067000;
+
+ ranges.reader_wm_sets[3].wm_inst = WM_D;
+ ranges.reader_wm_sets[3].min_drain_clk_khz = 300000;
+ ranges.reader_wm_sets[3].max_drain_clk_khz = 654000;
+ ranges.reader_wm_sets[3].min_fill_clk_khz = 1200000;
+ ranges.reader_wm_sets[3].max_fill_clk_khz = 1200000;
+ ranges.writer_wm_sets[3].wm_inst = WM_D;
+ ranges.writer_wm_sets[3].min_fill_clk_khz = 200000;
+ ranges.writer_wm_sets[3].max_fill_clk_khz = 757000;
+ ranges.writer_wm_sets[3].min_drain_clk_khz = 1200000;
+ ranges.writer_wm_sets[3].max_drain_clk_khz = 1200000;
+ }
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ pp->set_wm_ranges(&pp->pp_smu, &ranges);
+}
+
+void dcn_bw_sync_calcs_and_dml(struct dc *dc)
+{
+ kernel_fpu_begin();
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "sr_exit_time: %d ns\n"
+ "sr_enter_plus_exit_time: %d ns\n"
+ "urgent_latency: %d ns\n"
+ "write_back_latency: %d ns\n"
+ "percent_of_ideal_drambw_received_after_urg_latency: %d %\n"
+ "max_request_size: %d bytes\n"
+ "dcfclkv_max0p9: %d kHz\n"
+ "dcfclkv_nom0p8: %d kHz\n"
+ "dcfclkv_mid0p72: %d kHz\n"
+ "dcfclkv_min0p65: %d kHz\n"
+ "max_dispclk_vmax0p9: %d kHz\n"
+ "max_dispclk_vnom0p8: %d kHz\n"
+ "max_dispclk_vmid0p72: %d kHz\n"
+ "max_dispclk_vmin0p65: %d kHz\n"
+ "max_dppclk_vmax0p9: %d kHz\n"
+ "max_dppclk_vnom0p8: %d kHz\n"
+ "max_dppclk_vmid0p72: %d kHz\n"
+ "max_dppclk_vmin0p65: %d kHz\n"
+ "socclk: %d kHz\n"
+ "fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n"
+ "fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n"
+ "phyclkv_max0p9: %d kHz\n"
+ "phyclkv_nom0p8: %d kHz\n"
+ "phyclkv_mid0p72: %d kHz\n"
+ "phyclkv_min0p65: %d kHz\n"
+ "downspreading: %d %\n"
+ "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n"
+ "urgent_out_of_order_return_per_channel: %d Bytes\n"
+ "number_of_channels: %d\n"
+ "vmm_page_size: %d Bytes\n"
+ "dram_clock_change_latency: %d ns\n"
+ "return_bus_width: %d Bytes\n",
+ dc->dcn_soc->sr_exit_time * 1000,
+ dc->dcn_soc->sr_enter_plus_exit_time * 1000,
+ dc->dcn_soc->urgent_latency * 1000,
+ dc->dcn_soc->write_back_latency * 1000,
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency,
+ dc->dcn_soc->max_request_size,
+ dc->dcn_soc->dcfclkv_max0p9 * 1000,
+ dc->dcn_soc->dcfclkv_nom0p8 * 1000,
+ dc->dcn_soc->dcfclkv_mid0p72 * 1000,
+ dc->dcn_soc->dcfclkv_min0p65 * 1000,
+ dc->dcn_soc->max_dispclk_vmax0p9 * 1000,
+ dc->dcn_soc->max_dispclk_vnom0p8 * 1000,
+ dc->dcn_soc->max_dispclk_vmid0p72 * 1000,
+ dc->dcn_soc->max_dispclk_vmin0p65 * 1000,
+ dc->dcn_soc->max_dppclk_vmax0p9 * 1000,
+ dc->dcn_soc->max_dppclk_vnom0p8 * 1000,
+ dc->dcn_soc->max_dppclk_vmid0p72 * 1000,
+ dc->dcn_soc->max_dppclk_vmin0p65 * 1000,
+ dc->dcn_soc->socclk * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000,
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000,
+ dc->dcn_soc->phyclkv_max0p9 * 1000,
+ dc->dcn_soc->phyclkv_nom0p8 * 1000,
+ dc->dcn_soc->phyclkv_mid0p72 * 1000,
+ dc->dcn_soc->phyclkv_min0p65 * 1000,
+ dc->dcn_soc->downspreading * 100,
+ dc->dcn_soc->round_trip_ping_latency_cycles,
+ dc->dcn_soc->urgent_out_of_order_return_per_channel,
+ dc->dcn_soc->number_of_channels,
+ dc->dcn_soc->vmm_page_size,
+ dc->dcn_soc->dram_clock_change_latency * 1000,
+ dc->dcn_soc->return_bus_width);
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "rob_buffer_size_in_kbyte: %d\n"
+ "det_buffer_size_in_kbyte: %d\n"
+ "dpp_output_buffer_pixels: %d\n"
+ "opp_output_buffer_lines: %d\n"
+ "pixel_chunk_size_in_kbyte: %d\n"
+ "pte_enable: %d\n"
+ "pte_chunk_size: %d kbytes\n"
+ "meta_chunk_size: %d kbytes\n"
+ "writeback_chunk_size: %d kbytes\n"
+ "odm_capability: %d\n"
+ "dsc_capability: %d\n"
+ "line_buffer_size: %d bits\n"
+ "max_line_buffer_lines: %d\n"
+ "is_line_buffer_bpp_fixed: %d\n"
+ "line_buffer_fixed_bpp: %d\n"
+ "writeback_luma_buffer_size: %d kbytes\n"
+ "writeback_chroma_buffer_size: %d kbytes\n"
+ "max_num_dpp: %d\n"
+ "max_num_writeback: %d\n"
+ "max_dchub_topscl_throughput: %d pixels/dppclk\n"
+ "max_pscl_tolb_throughput: %d pixels/dppclk\n"
+ "max_lb_tovscl_throughput: %d pixels/dppclk\n"
+ "max_vscl_tohscl_throughput: %d pixels/dppclk\n"
+ "max_hscl_ratio: %d\n"
+ "max_vscl_ratio: %d\n"
+ "max_hscl_taps: %d\n"
+ "max_vscl_taps: %d\n"
+ "pte_buffer_size_in_requests: %d\n"
+ "dispclk_ramping_margin: %d %\n"
+ "under_scan_factor: %d %\n"
+ "max_inter_dcn_tile_repeaters: %d\n"
+ "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n"
+ "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n"
+ "dcfclk_cstate_latency: %d\n",
+ dc->dcn_ip->rob_buffer_size_in_kbyte,
+ dc->dcn_ip->det_buffer_size_in_kbyte,
+ dc->dcn_ip->dpp_output_buffer_pixels,
+ dc->dcn_ip->opp_output_buffer_lines,
+ dc->dcn_ip->pixel_chunk_size_in_kbyte,
+ dc->dcn_ip->pte_enable,
+ dc->dcn_ip->pte_chunk_size,
+ dc->dcn_ip->meta_chunk_size,
+ dc->dcn_ip->writeback_chunk_size,
+ dc->dcn_ip->odm_capability,
+ dc->dcn_ip->dsc_capability,
+ dc->dcn_ip->line_buffer_size,
+ dc->dcn_ip->max_line_buffer_lines,
+ dc->dcn_ip->is_line_buffer_bpp_fixed,
+ dc->dcn_ip->line_buffer_fixed_bpp,
+ dc->dcn_ip->writeback_luma_buffer_size,
+ dc->dcn_ip->writeback_chroma_buffer_size,
+ dc->dcn_ip->max_num_dpp,
+ dc->dcn_ip->max_num_writeback,
+ dc->dcn_ip->max_dchub_topscl_throughput,
+ dc->dcn_ip->max_pscl_tolb_throughput,
+ dc->dcn_ip->max_lb_tovscl_throughput,
+ dc->dcn_ip->max_vscl_tohscl_throughput,
+ dc->dcn_ip->max_hscl_ratio,
+ dc->dcn_ip->max_vscl_ratio,
+ dc->dcn_ip->max_hscl_taps,
+ dc->dcn_ip->max_vscl_taps,
+ dc->dcn_ip->pte_buffer_size_in_requests,
+ dc->dcn_ip->dispclk_ramping_margin,
+ dc->dcn_ip->under_scan_factor * 100,
+ dc->dcn_ip->max_inter_dcn_tile_repeaters,
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one,
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed,
+ dc->dcn_ip->dcfclk_cstate_latency);
+ dc->dml.soc.vmin.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vmid.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vnom.socclk_mhz = dc->dcn_soc->socclk;
+ dc->dml.soc.vmax.socclk_mhz = dc->dcn_soc->socclk;
+
+ dc->dml.soc.vmin.dcfclk_mhz = dc->dcn_soc->dcfclkv_min0p65;
+ dc->dml.soc.vmid.dcfclk_mhz = dc->dcn_soc->dcfclkv_mid0p72;
+ dc->dml.soc.vnom.dcfclk_mhz = dc->dcn_soc->dcfclkv_nom0p8;
+ dc->dml.soc.vmax.dcfclk_mhz = dc->dcn_soc->dcfclkv_max0p9;
+
+ dc->dml.soc.vmin.dispclk_mhz = dc->dcn_soc->max_dispclk_vmin0p65;
+ dc->dml.soc.vmid.dispclk_mhz = dc->dcn_soc->max_dispclk_vmid0p72;
+ dc->dml.soc.vnom.dispclk_mhz = dc->dcn_soc->max_dispclk_vnom0p8;
+ dc->dml.soc.vmax.dispclk_mhz = dc->dcn_soc->max_dispclk_vmax0p9;
+
+ dc->dml.soc.vmin.dppclk_mhz = dc->dcn_soc->max_dppclk_vmin0p65;
+ dc->dml.soc.vmid.dppclk_mhz = dc->dcn_soc->max_dppclk_vmid0p72;
+ dc->dml.soc.vnom.dppclk_mhz = dc->dcn_soc->max_dppclk_vnom0p8;
+ dc->dml.soc.vmax.dppclk_mhz = dc->dcn_soc->max_dppclk_vmax0p9;
+
+ dc->dml.soc.vmin.phyclk_mhz = dc->dcn_soc->phyclkv_min0p65;
+ dc->dml.soc.vmid.phyclk_mhz = dc->dcn_soc->phyclkv_mid0p72;
+ dc->dml.soc.vnom.phyclk_mhz = dc->dcn_soc->phyclkv_nom0p8;
+ dc->dml.soc.vmax.phyclk_mhz = dc->dcn_soc->phyclkv_max0p9;
+
+ dc->dml.soc.vmin.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65;
+ dc->dml.soc.vmid.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72;
+ dc->dml.soc.vnom.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8;
+ dc->dml.soc.vmax.dram_bw_per_chan_gbps = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9;
+
+ dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time;
+ dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency;
+ dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency;
+ dc->dml.soc.ideal_dram_bw_after_urgent_percent =
+ dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency;
+ dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size;
+ dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading;
+ dc->dml.soc.round_trip_ping_latency_dcfclk_cycles =
+ dc->dcn_soc->round_trip_ping_latency_cycles;
+ dc->dml.soc.urgent_out_of_order_return_per_channel_bytes =
+ dc->dcn_soc->urgent_out_of_order_return_per_channel;
+ dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels;
+ dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size;
+ dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency;
+ dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width;
+
+ dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte;
+ dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte;
+ dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels;
+ dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines;
+ dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte;
+ dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes;
+ dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size;
+ dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size;
+ dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size;
+ dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size;
+ dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines;
+ dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes;
+ dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp;
+ dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size;
+ dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size;
+ dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp;
+ dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback;
+ dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput;
+ dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput;
+ dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput;
+ dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput;
+ dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio;
+ dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio;
+ dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps;
+ dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps;
+ /*pte_buffer_size_in_requests missing in dml*/
+ dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin;
+ dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor;
+ dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters;
+ dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one =
+ dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes;
+ dc->dml.ip.bug_forcing_LC_req_same_size_fixed =
+ dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes;
+ dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency;
+ kernel_fpu_end();
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
new file mode 100644
index 000000000000..7240db2e6f09
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -0,0 +1,1684 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bios_parser_interface.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "dpp.h"
+#include "timing_generator.h"
+#include "virtual/virtual_link_encoder.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "mem_input.h"
+#include "hubp.h"
+
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destroy_links(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (NULL != dc->links[i])
+ link_destroy(&dc->links[i]);
+ }
+}
+
+static bool create_links(
+ struct dc *dc,
+ uint32_t num_virtual_links)
+{
+ int i;
+ int connectors_num;
+ struct dc_bios *bios = dc->ctx->dc_bios;
+
+ dc->link_count = 0;
+
+ connectors_num = bios->funcs->get_connectors_number(bios);
+
+ if (connectors_num > ENUM_ID_COUNT) {
+ dm_error(
+ "DC: Number of connectors %d exceeds maximum of %d!\n",
+ connectors_num,
+ ENUM_ID_COUNT);
+ return false;
+ }
+
+ if (connectors_num == 0 && num_virtual_links == 0) {
+ dm_error("DC: Number of connectors is zero!\n");
+ }
+
+ dm_output_to_console(
+ "DC: %s: connectors_num: physical:%d, virtual:%d\n",
+ __func__,
+ connectors_num,
+ num_virtual_links);
+
+ for (i = 0; i < connectors_num; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ link_init_params.ctx = dc->ctx;
+ /* next BIOS object table connector */
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link = link_create(&link_init_params);
+
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
+ for (i = 0; i < num_virtual_links; i++) {
+ struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
+ struct encoder_init_data enc_init = {0};
+
+ if (link == NULL) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_index = dc->link_count;
+ dc->links[dc->link_count] = link;
+ dc->link_count++;
+
+ link->ctx = dc->ctx;
+ link->dc = dc;
+ link->connector_signal = SIGNAL_TYPE_VIRTUAL;
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_VIRTUAL;
+ link->link_id.enum_id = ENUM_ID_1;
+ link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+
+ if (!link->link_enc) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ enc_init.ctx = dc->ctx;
+ enc_init.channel = CHANNEL_ID_UNKNOWN;
+ enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc_init.transmitter = TRANSMITTER_UNKNOWN;
+ enc_init.connector = link->link_id;
+ enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+ enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+ enc_init.encoder.enum_id = ENUM_ID_1;
+ virtual_link_encoder_construct(link->link_enc, &enc_init);
+ }
+
+ return true;
+
+failed_alloc:
+ return false;
+}
+
+static bool stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ int vmin, int vmax)
+{
+ /* TODO: Support multiple streams */
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.set_drr(&pipe, 1, vmin, vmax);
+
+ /* build and update the info frame */
+ resource_build_info_frame(pipe);
+ dc->hwss.update_info_frame(pipe);
+
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+static bool stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_pos, unsigned int *nom_v_pos)
+{
+ /* TODO: Support multiple streams */
+ struct dc_stream_state *stream = streams[0];
+ int i = 0;
+ bool ret = false;
+ struct crtc_position position;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.get_position(&pipe, 1, &position);
+
+ *v_pos = position.vertical_count;
+ *nom_v_pos = position.nominal_vcount;
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+static bool set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+{
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_gamut_remap(pipes);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static bool program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i = 0;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_csc_matrix(pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static void set_static_screen_events(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+ const struct dc_static_screen_events *events)
+{
+ int i = 0;
+ int j = 0;
+ struct pipe_ctx *pipes_affected[MAX_PIPES];
+ int num_pipes_affected = 0;
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream = streams[i];
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].stream
+ == stream) {
+ pipes_affected[num_pipes_affected++] =
+ &dc->current_state->res_ctx.pipe_ctx[j];
+ }
+ }
+ }
+
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
+}
+
+static void set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i] == link)
+ break;
+ }
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
+}
+
+static void perform_link_training(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ int i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc_link_dp_perform_link_training(
+ dc->links[i],
+ link_setting,
+ skip_video_pattern);
+}
+
+static void set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ link->preferred_link_setting = *link_setting;
+ dp_retrain_link_dp_test(link, link_setting, false);
+}
+
+static void enable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_enable_hpd(link);
+}
+
+static void disable_hpd(const struct dc_link *link)
+{
+ dc_link_dp_disable_hpd(link);
+}
+
+
+static void set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
+
+static void set_dither_option(struct dc_stream_state *stream,
+ enum dc_dither_option option)
+{
+ struct bit_depth_reduction_params params;
+ struct dc_link *link = stream->status.link;
+ struct pipe_ctx *pipes = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
+ stream) {
+ pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ memset(&params, 0, sizeof(params));
+ if (!pipes)
+ return;
+ if (option > DITHER_OPTION_MAX)
+ return;
+
+ stream->dither_option = option;
+
+ resource_build_bit_depth_reduction_params(stream,
+ &params);
+ stream->bit_depth_params = params;
+ pipes->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+}
+
+void set_dpms(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ bool dpms_off)
+{
+ struct pipe_ctx *pipe_ctx = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipe_ctx) {
+ ASSERT(0);
+ return;
+ }
+
+ if (stream->dpms_off != dpms_off) {
+ stream->dpms_off = dpms_off;
+ if (dpms_off)
+ core_link_disable_stream(pipe_ctx,
+ KEEP_ACQUIRED_RESOURCE);
+ else
+ core_link_enable_stream(dc->current_state, pipe_ctx);
+ }
+}
+
+static void allocate_dc_stream_funcs(struct dc *dc)
+{
+ if (dc->hwss.set_drr != NULL) {
+ dc->stream_funcs.adjust_vmin_vmax =
+ stream_adjust_vmin_vmax;
+ }
+
+ dc->stream_funcs.set_static_screen_events =
+ set_static_screen_events;
+
+ dc->stream_funcs.get_crtc_position =
+ stream_get_crtc_position;
+
+ dc->stream_funcs.set_gamut_remap =
+ set_gamut_remap;
+
+ dc->stream_funcs.program_csc_matrix =
+ program_csc_matrix;
+
+ dc->stream_funcs.set_dither_option =
+ set_dither_option;
+
+ dc->stream_funcs.set_dpms =
+ set_dpms;
+
+ dc->link_funcs.set_drive_settings =
+ set_drive_settings;
+
+ dc->link_funcs.perform_link_training =
+ perform_link_training;
+
+ dc->link_funcs.set_preferred_link_settings =
+ set_preferred_link_settings;
+
+ dc->link_funcs.enable_hpd =
+ enable_hpd;
+
+ dc->link_funcs.disable_hpd =
+ disable_hpd;
+
+ dc->link_funcs.set_test_pattern =
+ set_test_pattern;
+}
+
+static void destruct(struct dc *dc)
+{
+ dc_release_state(dc->current_state);
+ dc->current_state = NULL;
+
+ destroy_links(dc);
+
+ dc_destroy_resource_pool(dc);
+
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
+
+ if (dc->ctx->i2caux)
+ dal_i2caux_destroy(&dc->ctx->i2caux);
+
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+ if (dc->ctx->logger)
+ dal_logger_destroy(&dc->ctx->logger);
+
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+
+ kfree(dc->bw_vbios);
+ dc->bw_vbios = NULL;
+
+ kfree(dc->bw_dceip);
+ dc->bw_dceip = NULL;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ kfree(dc->dcn_soc);
+ dc->dcn_soc = NULL;
+
+ kfree(dc->dcn_ip);
+ dc->dcn_ip = NULL;
+
+#endif
+}
+
+static bool construct(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dal_logger *logger;
+ struct dc_context *dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ struct bw_calcs_dceip *dc_dceip = kzalloc(sizeof(*dc_dceip),
+ GFP_KERNEL);
+ struct bw_calcs_vbios *dc_vbios = kzalloc(sizeof(*dc_vbios),
+ GFP_KERNEL);
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_soc_bounding_box *dcn_soc = kzalloc(sizeof(*dcn_soc),
+ GFP_KERNEL);
+ struct dcn_ip_params *dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+#endif
+
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+
+ if (!dc_dceip) {
+ dm_error("%s: failed to create dceip\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_dceip = dc_dceip;
+
+ if (!dc_vbios) {
+ dm_error("%s: failed to create vbios\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_vbios = dc_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (!dcn_soc) {
+ dm_error("%s: failed to create dcn_soc\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_soc = dcn_soc;
+
+ if (!dcn_ip) {
+ dm_error("%s: failed to create dcn_ip\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_ip = dcn_ip;
+#endif
+
+ if (!dc_ctx) {
+ dm_error("%s: failed to create ctx\n", __func__);
+ goto fail;
+ }
+
+ dc->current_state = dc_create_state();
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+
+ /* Create logger */
+ logger = dal_logger_create(dc_ctx, init_params->log_mask);
+
+ if (!logger) {
+ /* can *not* call logger. call base driver 'print error' */
+ dm_error("%s: failed to create Logger!\n", __func__);
+ goto fail;
+ }
+ dc_ctx->logger = logger;
+ dc->ctx = dc_ctx;
+ dc->ctx->dce_environment = init_params->dce_environment;
+
+ dc_version = resource_parse_asic_id(init_params->asic_id);
+ dc->ctx->dce_version = dc_version;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ dc->ctx->fbc_gpu_addr = init_params->fbc_gpu_addr;
+#endif
+ /* Resource should construct all asic specific resources.
+ * This should be the only place where we need to parse the asic id
+ */
+ if (init_params->vbios_override)
+ dc_ctx->dc_bios = init_params->vbios_override;
+ else {
+ /* Create BIOS parser */
+ struct bp_init_data bp_init_data;
+
+ bp_init_data.ctx = dc_ctx;
+ bp_init_data.bios = init_params->asic_id.atombios_base_address;
+
+ dc_ctx->dc_bios = dal_bios_parser_create(
+ &bp_init_data, dc_version);
+
+ if (!dc_ctx->dc_bios) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc_ctx->created_bios = true;
+ }
+
+ /* Create I2C AUX */
+ dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
+
+ if (!dc_ctx->i2caux) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ /* Create GPIO service */
+ dc_ctx->gpio_service = dal_gpio_service_create(
+ dc_version,
+ dc_ctx->dce_environment,
+ dc_ctx);
+
+ if (!dc_ctx->gpio_service) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc->res_pool = dc_create_resource_pool(
+ dc,
+ init_params->num_virtual_links,
+ dc_version,
+ init_params->asic_id);
+ if (!dc->res_pool)
+ goto fail;
+
+ dc_resource_state_construct(dc, dc->current_state);
+
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ allocate_dc_stream_funcs(dc);
+
+ return true;
+
+fail:
+
+ destruct(dc);
+ return false;
+}
+
+static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+{
+ int i, j;
+ struct dc_state *dangling_context = dc_create_state();
+ struct dc_state *current_ctx;
+
+ if (dangling_context == NULL)
+ return;
+
+ dc_resource_state_copy_construct(dc->current_state, dangling_context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *old_stream =
+ dc->current_state->res_ctx.pipe_ctx[i].stream;
+ bool should_disable = true;
+
+ for (j = 0; j < context->stream_count; j++) {
+ if (old_stream == context->streams[j]) {
+ should_disable = false;
+ break;
+ }
+ }
+ if (should_disable && old_stream) {
+ dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ }
+ }
+
+ current_ctx = dc->current_state;
+ dc->current_state = dangling_context;
+ dc_release_state(current_ctx);
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+struct dc *dc_create(const struct dc_init_data *init_params)
+ {
+ struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ unsigned int full_pipe_count;
+
+ if (NULL == dc)
+ goto alloc_fail;
+
+ if (false == construct(dc, init_params))
+ goto construct_fail;
+
+ /*TODO: separate HW and SW initialization*/
+ dc->hwss.init_hw(dc);
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
+
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+
+ dc->config = init_params->flags;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Display Core initialized\n");
+
+
+ /* TODO: missing feature to be enabled */
+ dc->debug.disable_dfs_bypass = true;
+
+ return dc;
+
+construct_fail:
+ kfree(dc);
+
+alloc_fail:
+ return NULL;
+}
+
+void dc_destroy(struct dc **dc)
+{
+ destruct(*dc);
+ kfree(*dc);
+ *dc = NULL;
+}
+
+static void program_timing_sync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i, j;
+ int group_index = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
+ continue;
+
+ unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
+ }
+
+ for (i = 0; i < pipe_count; i++) {
+ int group_size = 1;
+ struct pipe_ctx *pipe_set[MAX_PIPES];
+
+ if (!unsynced_pipes[i])
+ continue;
+
+ pipe_set[0] = unsynced_pipes[i];
+ unsynced_pipes[i] = NULL;
+
+ /* Add tg to the set, search rest of the tg's for ones with
+ * same timing, add all tgs with same timing to the group
+ */
+ for (j = i + 1; j < pipe_count; j++) {
+ if (!unsynced_pipes[j])
+ continue;
+
+ if (resource_are_streams_timing_synchronizable(
+ unsynced_pipes[j]->stream,
+ pipe_set[0]->stream)) {
+ pipe_set[group_size] = unsynced_pipes[j];
+ unsynced_pipes[j] = NULL;
+ group_size++;
+ }
+ }
+
+ /* set first unblanked pipe as master */
+ for (j = 0; j < group_size; j++) {
+ struct pipe_ctx *temp;
+
+ if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ if (j == 0)
+ break;
+
+ temp = pipe_set[0];
+ pipe_set[0] = pipe_set[j];
+ pipe_set[j] = temp;
+ break;
+ }
+ }
+
+ /* remove any other unblanked pipes as they have already been synced */
+ for (j = j + 1; j < group_size; j++) {
+ if (!pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
+ }
+
+ if (group_size > 1) {
+ dc->hwss.enable_timing_synchronization(
+ dc, group_index, group_size, pipe_set);
+ group_index++;
+ }
+ }
+}
+
+static bool context_changed(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ uint8_t i;
+
+ if (context->stream_count != dc->current_state->stream_count)
+ return true;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i] != context->streams[i])
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ bool ret = true;
+ int i, j;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context != NULL)
+ pipe = &context->res_ctx.pipe_ctx[i];
+ else
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ for (j = 0 ; pipe && j < stream_count; j++) {
+ if (streams[j] && streams[j] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ return ret;
+}
+
+
+/*
+ * Applies given context to HW and copy it into current context.
+ * It's up to the user to release the src context afterwards.
+ */
+static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ struct pipe_ctx *pipe;
+ int i, j, k, l;
+ struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
+
+ disable_dangling_plane(dc, context);
+
+ for (i = 0; i < context->stream_count; i++)
+ dc_streams[i] = context->streams[i];
+
+ if (!dcb->funcs->is_accelerated_mode(dcb))
+ dc->hwss.enable_accelerated_mode(dc);
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_sink *sink = context->streams[i]->sink;
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
+
+ /*
+ * enable stereo
+ * TODO rework dc_enable_stereo call to work with validation sets?
+ */
+ for (k = 0; k < MAX_PIPES; k++) {
+ pipe = &context->res_ctx.pipe_ctx[k];
+
+ for (l = 0 ; pipe && l < context->stream_count; l++) {
+ if (context->streams[l] &&
+ context->streams[l] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
+ context->streams[i]->timing.h_addressable,
+ context->streams[i]->timing.v_addressable,
+ context->streams[i]->timing.h_total,
+ context->streams[i]->timing.v_total,
+ context->streams[i]->timing.pix_clk_khz);
+ }
+
+ dc->hwss.ready_shared_resources(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+ program_timing_sync(dc, context);
+
+ dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count; i++) {
+ for (j = 0; j < MAX_PIPES; j++) {
+ pipe = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe->top_pipe && pipe->stream == context->streams[i])
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+ }
+
+ dc_release_state(dc->current_state);
+
+ dc->current_state = context;
+
+ dc_retain_state(dc->current_state);
+
+ dc->hwss.optimize_shared_resources(dc);
+
+ return result;
+}
+
+bool dc_commit_state(struct dc *dc, struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ int i;
+
+ if (false == context_changed(dc, context))
+ return DC_OK;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC, "%s: %d streams\n",
+ __func__, context->stream_count);
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ dc_stream_log(stream,
+ dc->ctx->logger,
+ LOG_DC);
+ }
+
+ result = dc_commit_state_no_check(dc, context);
+
+ return (result == DC_OK);
+}
+
+
+bool dc_post_update_surfaces_to_stream(struct dc *dc)
+{
+ int i;
+ struct dc_state *context = dc->current_state;
+
+ post_surface_trace(dc);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL
+ || context->res_ctx.pipe_ctx[i].plane_state == NULL)
+ dc->hwss.power_down_front_end(dc, i);
+
+ /* 3rd param should be true, temp w/a for RV*/
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ dc->hwss.set_bandwidth(dc, context, dc->ctx->dce_version < DCN_VERSION_1_0);
+#else
+ dc->hwss.set_bandwidth(dc, context, true);
+#endif
+ return true;
+}
+
+/*
+ * TODO this whole function needs to go
+ *
+ * dc_surface_update is needlessly complex. See if we can just replace this
+ * with a dc_plane_state and follow the atomic model a bit more closely here.
+ */
+bool dc_commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *state)
+{
+ /* no need to dynamically allocate this. it's pretty small */
+ struct dc_surface_update updates[MAX_SURFACES];
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ int i;
+ struct dc_stream_update *stream_update =
+ kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+
+ if (!stream_update) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs),
+ GFP_KERNEL);
+ plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info),
+ GFP_KERNEL);
+ scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info),
+ GFP_KERNEL);
+
+ if (!flip_addr || !plane_info || !scaling_info) {
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return false;
+ }
+
+ memset(updates, 0, sizeof(updates));
+
+ stream_update->src = dc_stream->src;
+ stream_update->dst = dc_stream->dst;
+ stream_update->out_transfer_func = dc_stream->out_transfer_func;
+
+ for (i = 0; i < new_plane_count; i++) {
+ updates[i].surface = plane_states[i];
+ updates[i].gamma =
+ (struct dc_gamma *)plane_states[i]->gamma_correction;
+ updates[i].in_transfer_func = plane_states[i]->in_transfer_func;
+ flip_addr[i].address = plane_states[i]->address;
+ flip_addr[i].flip_immediate = plane_states[i]->flip_immediate;
+ plane_info[i].color_space = plane_states[i]->color_space;
+ plane_info[i].format = plane_states[i]->format;
+ plane_info[i].plane_size = plane_states[i]->plane_size;
+ plane_info[i].rotation = plane_states[i]->rotation;
+ plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror;
+ plane_info[i].stereo_format = plane_states[i]->stereo_format;
+ plane_info[i].tiling_info = plane_states[i]->tiling_info;
+ plane_info[i].visible = plane_states[i]->visible;
+ plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha;
+ plane_info[i].dcc = plane_states[i]->dcc;
+ scaling_info[i].scaling_quality = plane_states[i]->scaling_quality;
+ scaling_info[i].src_rect = plane_states[i]->src_rect;
+ scaling_info[i].dst_rect = plane_states[i]->dst_rect;
+ scaling_info[i].clip_rect = plane_states[i]->clip_rect;
+
+ updates[i].flip_addr = &flip_addr[i];
+ updates[i].plane_info = &plane_info[i];
+ updates[i].scaling_info = &scaling_info[i];
+ }
+
+ dc_commit_updates_for_stream(
+ dc,
+ updates,
+ new_plane_count,
+ dc_stream, stream_update, plane_states, state);
+
+ kfree(flip_addr);
+ kfree(plane_info);
+ kfree(scaling_info);
+ kfree(stream_update);
+ return true;
+}
+
+struct dc_state *dc_create_state(void)
+{
+ struct dc_state *context = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!context)
+ return NULL;
+
+ kref_init(&context->refcount);
+ return context;
+}
+
+void dc_retain_state(struct dc_state *context)
+{
+ kref_get(&context->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+ struct dc_state *context = container_of(kref, struct dc_state, refcount);
+ dc_resource_state_destruct(context);
+ kfree(context);
+}
+
+void dc_release_state(struct dc_state *context)
+{
+ kref_put(&context->refcount, dc_state_free);
+}
+
+static bool is_surface_in_context(
+ const struct dc_state *context,
+ const struct dc_plane_state *plane_state)
+{
+ int j;
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (plane_state == pipe_ctx->plane_state) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
+{
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ return 12;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ return 16;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ return 32;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ return 64;
+ default:
+ ASSERT_CRITICAL(false);
+ return -1;
+ }
+}
+
+static enum surface_update_type get_plane_info_update_type(
+ const struct dc_surface_update *u,
+ int surface_index)
+{
+ struct dc_plane_info temp_plane_info;
+ memset(&temp_plane_info, 0, sizeof(temp_plane_info));
+
+ if (!u->plane_info)
+ return UPDATE_TYPE_FAST;
+
+ temp_plane_info = *u->plane_info;
+
+ /* Copy all parameters that will cause a full update
+ * from current surface, the rest of the parameters
+ * from provided plane configuration.
+ * Perform memory compare and special validation
+ * for those that can cause fast/medium updates
+ */
+
+ /* Full update parameters */
+ temp_plane_info.color_space = u->surface->color_space;
+ temp_plane_info.dcc = u->surface->dcc;
+ temp_plane_info.horizontal_mirror = u->surface->horizontal_mirror;
+ temp_plane_info.plane_size = u->surface->plane_size;
+ temp_plane_info.rotation = u->surface->rotation;
+ temp_plane_info.stereo_format = u->surface->stereo_format;
+
+ if (surface_index == 0)
+ temp_plane_info.visible = u->plane_info->visible;
+ else
+ temp_plane_info.visible = u->surface->visible;
+
+ if (memcmp(u->plane_info, &temp_plane_info,
+ sizeof(struct dc_plane_info)) != 0)
+ return UPDATE_TYPE_FULL;
+
+ if (pixel_format_to_bpp(u->plane_info->format) !=
+ pixel_format_to_bpp(u->surface->format)) {
+ /* different bytes per element will require full bandwidth
+ * and DML calculation
+ */
+ return UPDATE_TYPE_FULL;
+ }
+
+ if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
+ sizeof(union dc_tiling_info)) != 0) {
+ /* todo: below are HW dependent, we should add a hook to
+ * DCE/N resource and validated there.
+ */
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ /* swizzled mode requires RQ to be setup properly,
+ * thus need to run DML to calculate RQ settings
+ */
+ return UPDATE_TYPE_FULL;
+ }
+ }
+
+ return UPDATE_TYPE_MED;
+}
+
+static enum surface_update_type get_scaling_info_update_type(
+ const struct dc_surface_update *u)
+{
+ if (!u->scaling_info)
+ return UPDATE_TYPE_FAST;
+
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height
+ || u->scaling_info->clip_rect.width != u->surface->clip_rect.width
+ || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
+ || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height)
+ return UPDATE_TYPE_FULL;
+
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+ || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
+ || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ return UPDATE_TYPE_MED;
+
+ return UPDATE_TYPE_FAST;
+}
+
+static enum surface_update_type det_surface_update(
+ const struct dc *dc,
+ const struct dc_surface_update *u,
+ int surface_index)
+{
+ const struct dc_state *context = dc->current_state;
+ enum surface_update_type type = UPDATE_TYPE_FAST;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (!is_surface_in_context(context, u->surface))
+ return UPDATE_TYPE_FULL;
+
+ type = get_plane_info_update_type(u, surface_index);
+ if (overall_type < type)
+ overall_type = type;
+
+ type = get_scaling_info_update_type(u);
+ if (overall_type < type)
+ overall_type = type;
+
+ if (u->in_transfer_func ||
+ u->hdr_static_metadata) {
+ if (overall_type < UPDATE_TYPE_MED)
+ overall_type = UPDATE_TYPE_MED;
+ }
+
+ return overall_type;
+}
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return UPDATE_TYPE_FULL;
+
+ if (stream_update)
+ return UPDATE_TYPE_FULL;
+
+ for (i = 0 ; i < surface_count; i++) {
+ enum surface_update_type type =
+ det_surface_update(dc, &updates[i], i);
+
+ if (type == UPDATE_TYPE_FULL)
+ return type;
+
+ if (overall_type < type)
+ overall_type = type;
+ }
+
+ return overall_type;
+}
+
+static struct dc_stream_status *stream_get_status(
+ struct dc_state *ctx,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < ctx->stream_count; i++) {
+ if (stream == ctx->streams[i]) {
+ return &ctx->stream_status[i];
+ }
+ }
+
+ return NULL;
+}
+
+static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+
+static void commit_planes_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+
+ if (update_type == UPDATE_TYPE_FULL) {
+ dc->hwss.set_bandwidth(dc, context, false);
+ context_clock_trace(dc, context);
+ }
+
+ if (update_type > UPDATE_TYPE_FAST) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
+ }
+ }
+
+ if (surface_count == 0) {
+ /*
+ * In case of turning off screen, no need to program front end a second time.
+ * just return after program front end.
+ */
+ dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
+ return;
+ }
+
+ /* Lock pipes for provided surfaces, or all active if full update*/
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (update_type != UPDATE_TYPE_FULL && pipe_ctx->plane_state != plane_state)
+ continue;
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+ dc->hwss.pipe_control_lock(
+ dc,
+ pipe_ctx,
+ true);
+ }
+ if (update_type == UPDATE_TYPE_FULL)
+ break;
+ }
+
+ /* Full fe update*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (update_type != UPDATE_TYPE_FULL || !pipe_ctx->plane_state)
+ continue;
+
+ if (!pipe_ctx->top_pipe && pipe_ctx->stream) {
+ struct dc_stream_status *stream_status = stream_get_status(context, pipe_ctx->stream);
+
+ dc->hwss.apply_ctx_for_surface(
+ dc, pipe_ctx->stream, stream_status->plane_count, context);
+ }
+ }
+
+ if (update_type > UPDATE_TYPE_FAST)
+ context_timing_trace(dc, &context->res_ctx);
+
+ /* Perform requested Updates */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ if (update_type == UPDATE_TYPE_MED)
+ dc->hwss.apply_ctx_for_surface(
+ dc, stream, surface_count, context);
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ if (srf_updates[i].flip_addr)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ /* work around to program degamma regs for split pipe after set mode. */
+ if (srf_updates[i].in_transfer_func || (pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state))
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+
+ if (stream_update != NULL &&
+ stream_update->out_transfer_func != NULL) {
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+
+ if (srf_updates[i].hdr_static_metadata) {
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+ }
+ }
+ }
+
+ /* Unlock pipes */
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ for (j = 0; j < surface_count; j++) {
+ if (update_type != UPDATE_TYPE_FULL &&
+ srf_updates[j].surface != pipe_ctx->plane_state)
+ continue;
+ if (!pipe_ctx->plane_state || pipe_ctx->top_pipe)
+ continue;
+
+ dc->hwss.pipe_control_lock(
+ dc,
+ pipe_ctx,
+ false);
+
+ break;
+ }
+ }
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_plane_state **plane_states,
+ struct dc_state *state)
+{
+ const struct dc_stream_status *stream_status;
+ enum surface_update_type update_type;
+ struct dc_state *context;
+ struct dc_context *dc_ctx = dc->ctx;
+ int i, j;
+
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
+ if (update_type >= update_surface_trace_level)
+ update_surface_trace(dc, srf_updates, surface_count);
+
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+
+ /* initialize scratch memory for building context */
+ context = dc_create_state();
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return;
+ }
+
+ dc_resource_state_copy_construct(state, context);
+ }
+
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ /* TODO: On flip we don't build the state, so it still has the
+ * old address. Which is why we are updating the address here
+ */
+ if (srf_updates[i].flip_addr) {
+ surface->address = srf_updates[i].flip_addr->address;
+ surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
+
+ }
+
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+
+ if (update_type >= UPDATE_TYPE_FULL)
+ dc_post_update_surfaces_to_stream(dc);
+
+ if (dc->current_state != context) {
+
+ struct dc_state *old = dc->current_state;
+
+ dc->current_state = context;
+ dc_release_state(old);
+
+ }
+
+ return;
+
+}
+
+uint8_t dc_get_current_stream_count(struct dc *dc)
+{
+ return dc->current_state->stream_count;
+}
+
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
+{
+ if (i < dc->current_state->stream_count)
+ return dc->current_state->streams[i];
+ return NULL;
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
+}
+
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+{
+
+ if (dc == NULL)
+ return;
+
+ dal_irq_service_set(dc->res_pool->irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+ dal_irq_service_ack(dc->res_pool->irqs, src);
+}
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state)
+{
+ struct kref refcount;
+
+ switch (power_state) {
+ case DC_ACPI_CM_POWER_STATE_D0:
+ dc_resource_state_construct(dc, dc->current_state);
+
+ dc->hwss.init_hw(dc);
+ break;
+ default:
+
+ dc->hwss.power_down(dc);
+
+ /* Zero out the current context so that on resume we start with
+ * clean state, and dc hw programming optimizations will not
+ * cause any trouble.
+ */
+
+ /* Preserve refcount */
+ refcount = dc->current_state->refcount;
+ dc_resource_state_destruct(dc->current_state);
+ memset(dc->current_state, 0,
+ sizeof(*dc->current_state));
+
+ dc->current_state->refcount = refcount;
+
+ break;
+ }
+
+}
+
+void dc_resume(struct dc *dc)
+{
+
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++)
+ core_link_resume(dc->links[i]);
+}
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd)
+{
+
+ struct dc_link *link = dc->links[link_index];
+ struct ddc_service *ddc = link->ddc;
+
+ return dal_i2caux_submit_i2c_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ cmd);
+}
+
+static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
+{
+ if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ dc_sink_retain(sink);
+
+ dc_link->remote_sinks[dc_link->sink_count] = sink;
+ dc_link->sink_count++;
+
+ return true;
+}
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data)
+{
+ struct dc_sink *dc_sink;
+ enum dc_edid_status edid_status;
+
+ if (len > MAX_EDID_BUFFER_SIZE) {
+ dm_error("Max EDID buffer size breached!\n");
+ return NULL;
+ }
+
+ if (!init_data) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!init_data->link) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dc_sink = dc_sink_create(init_data);
+
+ if (!dc_sink)
+ return NULL;
+
+ memmove(dc_sink->dc_edid.raw_edid, edid, len);
+ dc_sink->dc_edid.length = len;
+
+ if (!link_add_remote_sink_helper(
+ link,
+ dc_sink))
+ goto fail_add_sink;
+
+ edid_status = dm_helpers_parse_edid_caps(
+ link->ctx,
+ &dc_sink->dc_edid,
+ &dc_sink->edid_caps);
+
+ if (edid_status != EDID_OK)
+ goto fail;
+
+ return dc_sink;
+fail:
+ dc_link_remove_remote_sink(link, dc_sink);
+fail_add_sink:
+ dc_sink_release(dc_sink);
+ return NULL;
+}
+
+void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
+{
+ int i;
+
+ if (!link->sink_count) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ for (i = 0; i < link->sink_count; i++) {
+ if (link->remote_sinks[i] == sink) {
+ dc_sink_release(sink);
+ link->remote_sinks[i] = NULL;
+
+ /* shrink array to remove empty place */
+ while (i < link->sink_count - 1) {
+ link->remote_sinks[i] = link->remote_sinks[i+1];
+ i++;
+ }
+ link->remote_sinks[i] = NULL;
+ link->sink_count--;
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
new file mode 100644
index 000000000000..6acee5426e4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -0,0 +1,359 @@
+/*
+ * dc_debug.c
+ *
+ * Created on: Nov 3, 2016
+ * Author: yonsun
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#define SURFACE_TRACE(...) do {\
+ if (dc->debug.surface_trace) \
+ dm_logger_write(logger, \
+ LOG_IF_TRACE, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define TIMING_TRACE(...) do {\
+ if (dc->debug.timing_trace) \
+ dm_logger_write(logger, \
+ LOG_SYNC, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define CLOCK_TRACE(...) do {\
+ if (dc->debug.clock_trace) \
+ dm_logger_write(logger, \
+ LOG_BANDWIDTH_CALCS, \
+ ##__VA_ARGS__); \
+} while (0)
+
+void pre_surface_trace(
+ struct dc *dc,
+ const struct dc_plane_state *const *plane_states,
+ int surface_count)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ for (i = 0; i < surface_count; i++) {
+ const struct dc_plane_state *plane_state = plane_states[i];
+
+ SURFACE_TRACE("Planes %d:\n", i);
+
+ SURFACE_TRACE(
+ "plane_state->visible = %d;\n"
+ "plane_state->flip_immediate = %d;\n"
+ "plane_state->address.type = %d;\n"
+ "plane_state->address.grph.addr.quad_part = 0x%X;\n"
+ "plane_state->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "plane_state->scaling_quality.h_taps = %d;\n"
+ "plane_state->scaling_quality.v_taps = %d;\n"
+ "plane_state->scaling_quality.h_taps_c = %d;\n"
+ "plane_state->scaling_quality.v_taps_c = %d;\n",
+ plane_state->visible,
+ plane_state->flip_immediate,
+ plane_state->address.type,
+ plane_state->address.grph.addr.quad_part,
+ plane_state->address.grph.meta_addr.quad_part,
+ plane_state->scaling_quality.h_taps,
+ plane_state->scaling_quality.v_taps,
+ plane_state->scaling_quality.h_taps_c,
+ plane_state->scaling_quality.v_taps_c);
+
+ SURFACE_TRACE(
+ "plane_state->src_rect.x = %d;\n"
+ "plane_state->src_rect.y = %d;\n"
+ "plane_state->src_rect.width = %d;\n"
+ "plane_state->src_rect.height = %d;\n"
+ "plane_state->dst_rect.x = %d;\n"
+ "plane_state->dst_rect.y = %d;\n"
+ "plane_state->dst_rect.width = %d;\n"
+ "plane_state->dst_rect.height = %d;\n"
+ "plane_state->clip_rect.x = %d;\n"
+ "plane_state->clip_rect.y = %d;\n"
+ "plane_state->clip_rect.width = %d;\n"
+ "plane_state->clip_rect.height = %d;\n",
+ plane_state->src_rect.x,
+ plane_state->src_rect.y,
+ plane_state->src_rect.width,
+ plane_state->src_rect.height,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.height,
+ plane_state->clip_rect.x,
+ plane_state->clip_rect.y,
+ plane_state->clip_rect.width,
+ plane_state->clip_rect.height);
+
+ SURFACE_TRACE(
+ "plane_state->plane_size.grph.surface_size.x = %d;\n"
+ "plane_state->plane_size.grph.surface_size.y = %d;\n"
+ "plane_state->plane_size.grph.surface_size.width = %d;\n"
+ "plane_state->plane_size.grph.surface_size.height = %d;\n"
+ "plane_state->plane_size.grph.surface_pitch = %d;\n",
+ plane_state->plane_size.grph.surface_size.x,
+ plane_state->plane_size.grph.surface_size.y,
+ plane_state->plane_size.grph.surface_size.width,
+ plane_state->plane_size.grph.surface_size.height,
+ plane_state->plane_size.grph.surface_pitch);
+
+
+ SURFACE_TRACE(
+ "plane_state->tiling_info.gfx8.num_banks = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_width = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_width_c = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_height = %d;\n"
+ "plane_state->tiling_info.gfx8.bank_height_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_aspect = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_aspect_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_split = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_split_c = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_mode = %d;\n"
+ "plane_state->tiling_info.gfx8.tile_mode_c = %d;\n",
+ plane_state->tiling_info.gfx8.num_banks,
+ plane_state->tiling_info.gfx8.bank_width,
+ plane_state->tiling_info.gfx8.bank_width_c,
+ plane_state->tiling_info.gfx8.bank_height,
+ plane_state->tiling_info.gfx8.bank_height_c,
+ plane_state->tiling_info.gfx8.tile_aspect,
+ plane_state->tiling_info.gfx8.tile_aspect_c,
+ plane_state->tiling_info.gfx8.tile_split,
+ plane_state->tiling_info.gfx8.tile_split_c,
+ plane_state->tiling_info.gfx8.tile_mode,
+ plane_state->tiling_info.gfx8.tile_mode_c);
+
+ SURFACE_TRACE(
+ "plane_state->tiling_info.gfx8.pipe_config = %d;\n"
+ "plane_state->tiling_info.gfx8.array_mode = %d;\n"
+ "plane_state->color_space = %d;\n"
+ "plane_state->dcc.enable = %d;\n"
+ "plane_state->format = %d;\n"
+ "plane_state->rotation = %d;\n"
+ "plane_state->stereo_format = %d;\n",
+ plane_state->tiling_info.gfx8.pipe_config,
+ plane_state->tiling_info.gfx8.array_mode,
+ plane_state->color_space,
+ plane_state->dcc.enable,
+ plane_state->format,
+ plane_state->rotation,
+ plane_state->stereo_format);
+
+ SURFACE_TRACE("plane_state->tiling_info.gfx9.swizzle = %d;\n",
+ plane_state->tiling_info.gfx9.swizzle);
+
+ SURFACE_TRACE("\n");
+ }
+ SURFACE_TRACE("\n");
+}
+
+void update_surface_trace(
+ struct dc *dc,
+ const struct dc_surface_update *updates,
+ int surface_count)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ for (i = 0; i < surface_count; i++) {
+ const struct dc_surface_update *update = &updates[i];
+
+ SURFACE_TRACE("Update %d\n", i);
+ if (update->flip_addr) {
+ SURFACE_TRACE("flip_addr->address.type = %d;\n"
+ "flip_addr->address.grph.addr.quad_part = 0x%X;\n"
+ "flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n"
+ "flip_addr->flip_immediate = %d;\n",
+ update->flip_addr->address.type,
+ update->flip_addr->address.grph.addr.quad_part,
+ update->flip_addr->address.grph.meta_addr.quad_part,
+ update->flip_addr->flip_immediate);
+ }
+
+ if (update->plane_info) {
+ SURFACE_TRACE(
+ "plane_info->color_space = %d;\n"
+ "plane_info->format = %d;\n"
+ "plane_info->plane_size.grph.surface_pitch = %d;\n"
+ "plane_info->plane_size.grph.surface_size.height = %d;\n"
+ "plane_info->plane_size.grph.surface_size.width = %d;\n"
+ "plane_info->plane_size.grph.surface_size.x = %d;\n"
+ "plane_info->plane_size.grph.surface_size.y = %d;\n"
+ "plane_info->rotation = %d;\n",
+ update->plane_info->color_space,
+ update->plane_info->format,
+ update->plane_info->plane_size.grph.surface_pitch,
+ update->plane_info->plane_size.grph.surface_size.height,
+ update->plane_info->plane_size.grph.surface_size.width,
+ update->plane_info->plane_size.grph.surface_size.x,
+ update->plane_info->plane_size.grph.surface_size.y,
+ update->plane_info->rotation,
+ update->plane_info->stereo_format);
+
+ SURFACE_TRACE(
+ "plane_info->tiling_info.gfx8.num_banks = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_width = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_width_c = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_height = %d;\n"
+ "plane_info->tiling_info.gfx8.bank_height_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_aspect = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_aspect_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_split = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_split_c = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_mode = %d;\n"
+ "plane_info->tiling_info.gfx8.tile_mode_c = %d;\n",
+ update->plane_info->tiling_info.gfx8.num_banks,
+ update->plane_info->tiling_info.gfx8.bank_width,
+ update->plane_info->tiling_info.gfx8.bank_width_c,
+ update->plane_info->tiling_info.gfx8.bank_height,
+ update->plane_info->tiling_info.gfx8.bank_height_c,
+ update->plane_info->tiling_info.gfx8.tile_aspect,
+ update->plane_info->tiling_info.gfx8.tile_aspect_c,
+ update->plane_info->tiling_info.gfx8.tile_split,
+ update->plane_info->tiling_info.gfx8.tile_split_c,
+ update->plane_info->tiling_info.gfx8.tile_mode,
+ update->plane_info->tiling_info.gfx8.tile_mode_c);
+
+ SURFACE_TRACE(
+ "plane_info->tiling_info.gfx8.pipe_config = %d;\n"
+ "plane_info->tiling_info.gfx8.array_mode = %d;\n"
+ "plane_info->visible = %d;\n"
+ "plane_info->per_pixel_alpha = %d;\n",
+ update->plane_info->tiling_info.gfx8.pipe_config,
+ update->plane_info->tiling_info.gfx8.array_mode,
+ update->plane_info->visible,
+ update->plane_info->per_pixel_alpha);
+
+ SURFACE_TRACE("surface->tiling_info.gfx9.swizzle = %d;\n",
+ update->plane_info->tiling_info.gfx9.swizzle);
+ }
+
+ if (update->scaling_info) {
+ SURFACE_TRACE(
+ "scaling_info->src_rect.x = %d;\n"
+ "scaling_info->src_rect.y = %d;\n"
+ "scaling_info->src_rect.width = %d;\n"
+ "scaling_info->src_rect.height = %d;\n"
+ "scaling_info->dst_rect.x = %d;\n"
+ "scaling_info->dst_rect.y = %d;\n"
+ "scaling_info->dst_rect.width = %d;\n"
+ "scaling_info->dst_rect.height = %d;\n"
+ "scaling_info->clip_rect.x = %d;\n"
+ "scaling_info->clip_rect.y = %d;\n"
+ "scaling_info->clip_rect.width = %d;\n"
+ "scaling_info->clip_rect.height = %d;\n"
+ "scaling_info->scaling_quality.h_taps = %d;\n"
+ "scaling_info->scaling_quality.v_taps = %d;\n"
+ "scaling_info->scaling_quality.h_taps_c = %d;\n"
+ "scaling_info->scaling_quality.v_taps_c = %d;\n",
+ update->scaling_info->src_rect.x,
+ update->scaling_info->src_rect.y,
+ update->scaling_info->src_rect.width,
+ update->scaling_info->src_rect.height,
+ update->scaling_info->dst_rect.x,
+ update->scaling_info->dst_rect.y,
+ update->scaling_info->dst_rect.width,
+ update->scaling_info->dst_rect.height,
+ update->scaling_info->clip_rect.x,
+ update->scaling_info->clip_rect.y,
+ update->scaling_info->clip_rect.width,
+ update->scaling_info->clip_rect.height,
+ update->scaling_info->scaling_quality.h_taps,
+ update->scaling_info->scaling_quality.v_taps,
+ update->scaling_info->scaling_quality.h_taps_c,
+ update->scaling_info->scaling_quality.v_taps_c);
+ }
+ SURFACE_TRACE("\n");
+ }
+ SURFACE_TRACE("\n");
+}
+
+void post_surface_trace(struct dc *dc)
+{
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ SURFACE_TRACE("post surface process.\n");
+
+}
+
+void context_timing_trace(
+ struct dc *dc,
+ struct resource_context *res_ctx)
+{
+ int i;
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+ int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+ struct crtc_position position;
+ unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
+
+
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ /* get_position() returns CRTC vertical/horizontal counter
+ * hence not applicable for underlay pipe
+ */
+ if (pipe_ctx->stream == NULL
+ || pipe_ctx->pipe_idx == underlay_idx)
+ continue;
+
+ pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
+ h_pos[i] = position.horizontal_count;
+ v_pos[i] = position.vertical_count;
+ }
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n",
+ pipe_ctx->stream_res.tg->inst,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ h_pos[i], v_pos[i]);
+ }
+}
+
+void context_clock_trace(
+ struct dc *dc,
+ struct dc_state *context)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ struct dc *core_dc = dc;
+ struct dal_logger *logger = core_dc->ctx->logger;
+
+ CLOCK_TRACE("Current: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
+ "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ context->bw.dcn.calc_clk.dispclk_khz,
+ context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.calc_clk.fclk_khz,
+ context->bw.dcn.calc_clk.dram_ccm_us,
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+ CLOCK_TRACE("Calculated: dispclk_khz:%d dppclk_div:%d dcfclk_khz:%d\n"
+ "dcfclk_deep_sleep_khz:%d fclk_khz:%d\n"
+ "dram_ccm_us:%d min_active_dram_ccm_us:%d\n",
+ context->bw.dcn.calc_clk.dispclk_khz,
+ context->bw.dcn.calc_clk.dppclk_div,
+ context->bw.dcn.calc_clk.dcfclk_khz,
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz,
+ context->bw.dcn.calc_clk.fclk_khz,
+ context->bw.dcn.calc_clk.dram_ccm_us,
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
new file mode 100644
index 000000000000..71993d5983bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "timing_generator.h"
+#include "hw_sequencer.h"
+
+/* used as index in array of black_color_format */
+enum black_color_format {
+ BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0,
+ BLACK_COLOR_FORMAT_RGB_LIMITED,
+ BLACK_COLOR_FORMAT_YUV_TV,
+ BLACK_COLOR_FORMAT_YUV_CV,
+ BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+ BLACK_COLOR_FORMAT_DEBUG,
+};
+
+static const struct tg_color black_color_format[] = {
+ /* BlackColorFormat_RGB_FullRange */
+ {0, 0, 0},
+ /* BlackColorFormat_RGB_Limited */
+ {0x40, 0x40, 0x40},
+ /* BlackColorFormat_YUV_TV */
+ {0x200, 0x40, 0x200},
+ /* BlackColorFormat_YUV_CV */
+ {0x1f4, 0x40, 0x1f4},
+ /* BlackColorFormat_YUV_SuperAA */
+ {0x1a2, 0x20, 0x1a2},
+ /* visual confirm debug */
+ {0xff, 0xff, 0},
+};
+
+void color_space_to_black_color(
+ const struct dc *dc,
+ enum dc_color_space colorspace,
+ struct tg_color *black_color)
+{
+ switch (colorspace) {
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV];
+ break;
+
+ case COLOR_SPACE_SRGB_LIMITED:
+ *black_color =
+ black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
+ break;
+
+ default:
+ /* fefault is sRGB black (full range). */
+ *black_color =
+ black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE];
+ /* default is sRGB black 0. */
+ break;
+ }
+}
+
+bool hwss_wait_for_blank_complete(
+ struct timing_generator *tg)
+{
+ int counter;
+
+ for (counter = 0; counter < 100; counter++) {
+ if (tg->funcs->is_blanked(tg))
+ break;
+
+ msleep(1);
+ }
+
+ if (counter == 100) {
+ dm_error("DC: failed to blank crtc!\n");
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
new file mode 100644
index 000000000000..e27ed4a45265
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -0,0 +1,2435 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "atom.h"
+#include "dm_helpers.h"
+#include "dc.h"
+#include "grph_object_id.h"
+#include "gpio_service_interface.h"
+#include "core_status.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+
+#include "link_encoder.h"
+#include "hw_sequencer.h"
+#include "resource.h"
+#include "abm.h"
+#include "fixed31_32.h"
+#include "dpcd_defs.h"
+#include "dmcu.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_enum.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define LINK_INFO(...) \
+ dm_logger_write(dc_ctx->logger, LOG_HW_HOTPLUG, \
+ __VA_ARGS__)
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+enum {
+ LINK_RATE_REF_FREQ_IN_MHZ = 27,
+ PEAK_FACTOR_X1000 = 1006
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destruct(struct dc_link *link)
+{
+ int i;
+
+ if (link->ddc)
+ dal_ddc_service_destroy(&link->ddc);
+
+ if(link->link_enc)
+ link->link_enc->funcs->destroy(&link->link_enc);
+
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ for (i = 0; i < link->sink_count; ++i)
+ dc_sink_release(link->remote_sinks[i]);
+}
+
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
+{
+ enum bp_result bp_result;
+ struct graphics_object_hpd_info hpd_info;
+ struct gpio_pin_info pin_info;
+
+ if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
+ return NULL;
+
+ bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+ hpd_info.hpd_int_gpio_uid, &pin_info);
+
+ if (bp_result != BP_RESULT_OK) {
+ ASSERT(bp_result == BP_RESULT_NORECORD);
+ return NULL;
+ }
+
+ return dal_gpio_service_create_irq(
+ gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+}
+
+/*
+ * Function: program_hpd_filter
+ *
+ * @brief
+ * Programs HPD filter on associated HPD line
+ *
+ * @param [in] delay_on_connect_in_ms: Connect filter timeout
+ * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
+ *
+ * @return
+ * true on success, false otherwise
+ */
+static bool program_hpd_filter(
+ const struct dc_link *link)
+{
+ bool result = false;
+
+ struct gpio *hpd;
+
+ int delay_on_connect_in_ms = 0;
+ int delay_on_disconnect_in_ms = 0;
+
+ /* Verify feature is supported */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* Program hpd filter */
+ delay_on_connect_in_ms = 500;
+ delay_on_disconnect_in_ms = 100;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* Program hpd filter to allow DP signal to settle */
+ /* 500: not able to detect MST <-> SST switch as HPD is low for
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50:not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
+ */
+ delay_on_connect_in_ms = 80;
+ delay_on_disconnect_in_ms = 0;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ default:
+ /* Don't program hpd filter */
+ return false;
+ }
+
+ /* Obtain HPD handle */
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (!hpd)
+ return result;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = delay_on_connect_in_ms;
+ config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+
+ result = true;
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+
+ return result;
+}
+
+static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+{
+ uint32_t is_hpd_high = 0;
+ struct gpio *hpd_pin;
+
+ /* todo: may need to lock gpio access */
+ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ if (hpd_pin == NULL)
+ goto hpd_gpio_failure;
+
+ dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
+ dal_gpio_get_value(hpd_pin, &is_hpd_high);
+ dal_gpio_close(hpd_pin);
+ dal_gpio_destroy_irq(&hpd_pin);
+
+ if (is_hpd_high) {
+ *type = dc_connection_single;
+ /* TODO: need to do the actual detection */
+ } else {
+ *type = dc_connection_none;
+ }
+
+ return true;
+
+hpd_gpio_failure:
+ return false;
+}
+
+static enum ddc_transaction_type get_ddc_transaction_type(
+ enum signal_type sink_signal)
+{
+ enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+ switch (sink_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_RGB:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* MST does not use I2COverAux, but there is the
+ * SPECIAL use case for "immediate dwnstrm device
+ * access" (EPR#370830). */
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ default:
+ break;
+ }
+
+ return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(
+ struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
+{
+ if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+ switch (downstream.id) {
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ {
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ }
+ }
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ case CONNECTOR_ID_VGA:
+ return SIGNAL_TYPE_RGB;
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ return SIGNAL_TYPE_HDMI_TYPE_A;
+ case CONNECTOR_ID_LVDS:
+ return SIGNAL_TYPE_LVDS;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ case CONNECTOR_ID_EDP:
+ return SIGNAL_TYPE_EDP;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ } else if (downstream.type == OBJECT_TYPE_ENCODER) {
+ switch (downstream.id) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ }
+
+ return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Check whether there is a dongle on DP connector
+ */
+static bool is_dp_sink_present(struct dc_link *link)
+{
+ enum gpio_result gpio_result;
+ uint32_t clock_pin = 0;
+
+ struct ddc *ddc;
+
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(link->link_id);
+
+ bool present =
+ ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP));
+
+ ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return present;
+ }
+
+ /* Open GPIO and set it to I2C mode */
+ /* Note: this GpioMode_Input will be converted
+ * to GpioConfigType_I2cAuxDualMode in GPIO component,
+ * which indicates we need additional delay */
+
+ if (GPIO_RESULT_OK != dal_ddc_open(
+ ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+ dal_gpio_destroy_ddc(&ddc);
+
+ return present;
+ }
+
+ /* Read GPIO: DP sink is present if both clock and data pins are zero */
+ /* [anaumov] in DAL2, there was no check for GPIO failure */
+
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+
+ present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
+
+ dal_ddc_close(ddc);
+
+ return present;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink(
+ struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ enum signal_type result = get_basic_signal_type(
+ link->link_enc->id, link->link_id);
+
+ /* Internal digital encoder will detect only dongles
+ * that require digital signal */
+
+ /* Detection mechanism is different
+ * for different native connectors.
+ * LVDS connector supports only LVDS signal;
+ * PCIE is a bus slot, the actual connector needs to be detected first;
+ * eDP connector supports only eDP signal;
+ * HDMI should check straps for audio */
+
+ /* PCIE detects the actual connector on add-on board */
+
+ if (link->link_id.id == CONNECTOR_ID_PCIE) {
+ /* ZAZTODO implement PCIE add-on card detection */
+ }
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A: {
+ /* check audio support:
+ * if native HDMI is not supported, switch to DVI */
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+
+ if (!aud_support->hdmi_audio_native)
+ if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT: {
+ /* DP HPD short pulse. Passive DP dongle will not
+ * have short pulse
+ */
+ if (reason != DETECT_REASON_HPDRX) {
+ /* Check whether DP signal detected: if not -
+ * we assume signal is DVI; it could be corrected
+ * to HDMI after dongle detection
+ */
+ if (!is_dp_sink_present(link))
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(
+ enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ switch (dongle_type) {
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ if (audio_support->hdmi_audio_on_dongle)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ if (audio_support->hdmi_audio_native)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ default:
+ signal = SIGNAL_TYPE_NONE;
+ break;
+ }
+
+ return signal;
+}
+
+static enum signal_type dp_passive_dongle_detection(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
+{
+ dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ ddc, sink_cap);
+ return decide_signal_from_strap_and_dongle_type(
+ sink_cap->dongle_type,
+ audio_support);
+}
+
+static void link_disconnect_sink(struct dc_link *link)
+{
+ if (link->local_sink) {
+ dc_sink_release(link->local_sink);
+ link->local_sink = NULL;
+ }
+
+ link->dpcd_sink_count = 0;
+}
+
+static void detect_dp(
+ struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ bool *converter_disable_audio,
+ struct audio_support *audio_support,
+ enum dc_detect_reason reason)
+{
+ bool boot = false;
+ sink_caps->signal = link_detect_sink(link, reason);
+ sink_caps->transaction_type =
+ get_ddc_transaction_type(sink_caps->signal);
+
+ if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ detect_dp_sink_caps(link);
+
+ if (is_mst_supported(link)) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ link->type = dc_connection_mst_branch;
+
+ /*
+ * This call will initiate MST topology discovery. Which
+ * will detect MST ports and add new DRM connector DRM
+ * framework. Then read EDID via remote i2c over aux. In
+ * the end, will notify DRM detect result and save EDID
+ * into DRM framework.
+ *
+ * .detect is called by .fill_modes.
+ * .fill_modes is called by user mode ioctl
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * .get_modes is called by .fill_modes.
+ *
+ * call .get_modes, AMDGPU DM implementation will create
+ * new dc_sink and add to dc_link. For long HPD plug
+ * in/out, MST has its own handle.
+ *
+ * Therefore, just after dc_create, link->sink is not
+ * created for MST until user mode app calls
+ * DRM_IOCTL_MODE_GETCONNECTOR.
+ *
+ * Need check ->sink usages in case ->sink = NULL
+ * TODO: s3 resume check
+ */
+ if (reason == DETECT_REASON_BOOT)
+ boot = true;
+
+ if (!dm_helpers_dp_mst_start_top_mgr(
+ link->ctx,
+ link, boot)) {
+ /* MST not supported */
+ link->type = dc_connection_single;
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ }
+ }
+
+ if (link->type != dc_connection_mst_branch &&
+ is_dp_active_dongle(link)) {
+ /* DP active dongles */
+ link->type = dc_connection_active_dongle;
+ if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+ /*
+ * active dongle unplug processing for short irq
+ */
+ link_disconnect_sink(link);
+ return;
+ }
+
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ *converter_disable_audio = true;
+ }
+ } else {
+ /* DP passive dongles */
+ sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+ sink_caps,
+ audio_support);
+ }
+}
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ uint8_t i;
+ bool converter_disable_audio = false;
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc_sink *sink = NULL;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+ if (false == detect_sink(link, &new_connection_type)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP &&
+ link->local_sink)
+ return true;
+
+ link_disconnect_sink(link);
+
+ if (new_connection_type != dc_connection_none) {
+ link->type = new_connection_type;
+
+ /* From Disconnected-to-Connected. */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ if (aud_support->hdmi_audio_native)
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ detect_edp_sink_caps(link);
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ detect_dp(
+ link,
+ &sink_caps,
+ &converter_disable_audio,
+ aud_support, reason);
+
+ /* Active dongle downstream unplug */
+ if (link->type == dc_connection_active_dongle
+ && link->dpcd_caps.sink_count.
+ bits.SINK_COUNT == 0)
+ return true;
+
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Connected\n",
+ link->link_index);
+ /* Need to setup mst link_cap struct here
+ * otherwise dc_link_detect() will leave mst link_cap
+ * empty which leads to allocate_mst_payload() has "0"
+ * pbn_per_slot value leading to exception on dal_fixed31_32_div()
+ */
+ link->verified_link_cap = link->reported_link_cap;
+ return false;
+ }
+
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return false;
+ } /* switch() */
+
+ if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+ link->dpcd_sink_count = link->dpcd_caps.sink_count.
+ bits.SINK_COUNT;
+ else
+ link->dpcd_sink_count = 1;
+
+ dal_ddc_service_set_transaction_type(
+ link->ddc,
+ sink_caps.transaction_type);
+
+ link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
+ link->ddc);
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ return false;
+ }
+
+ sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
+ sink->converter_disable_audio = converter_disable_audio;
+
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ switch (edid_status) {
+ case EDID_BAD_CHECKSUM:
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "EDID checksum invalid.\n");
+ break;
+ case EDID_NO_RESPONSE:
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "No EDID read.\n");
+ return false;
+
+ default:
+ break;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ /*
+ * TODO debug why Dell 2413 doesn't like
+ * two link trainings
+ */
+
+ /* deal with non-mst cases */
+ dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+ }
+
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+ /* Connectivity log: detection */
+ for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
+ CONN_DATA_DETECT(link,
+ &sink->dc_edid.raw_edid[i * EDID_BLOCK_SIZE],
+ EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ }
+
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+ "%s: "
+ "manufacturer_id = %X, "
+ "product_id = %X, "
+ "serial_number = %X, "
+ "manufacture_week = %d, "
+ "manufacture_year = %d, "
+ "display_name = %s, "
+ "speaker_flag = %d, "
+ "audio_mode_count = %d\n",
+ __func__,
+ sink->edid_caps.manufacturer_id,
+ sink->edid_caps.product_id,
+ sink->edid_caps.serial_number,
+ sink->edid_caps.manufacture_week,
+ sink->edid_caps.manufacture_year,
+ sink->edid_caps.display_name,
+ sink->edid_caps.speaker_flags,
+ sink->edid_caps.audio_mode_count);
+
+ for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_EDID_PARSER,
+ "%s: mode number = %d, "
+ "format_code = %d, "
+ "channel_count = %d, "
+ "sample_rate = %d, "
+ "sample_size = %d\n",
+ __func__,
+ i,
+ sink->edid_caps.audio_modes[i].format_code,
+ sink->edid_caps.audio_modes[i].channel_count,
+ sink->edid_caps.audio_modes[i].sample_rate,
+ sink->edid_caps.audio_modes[i].sample_size);
+ }
+
+ } else {
+ /* From Connected-to-Disconnected. */
+ if (link->type == dc_connection_mst_branch) {
+ LINK_INFO("link=%d, mst branch is now Disconnected\n",
+ link->link_index);
+
+ dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+
+ link->mst_stream_alloc_table.stream_count = 0;
+ memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+ }
+
+ link->type = dc_connection_none;
+ sink_caps.signal = SIGNAL_TYPE_NONE;
+ }
+
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
+ link->link_index, sink,
+ (sink_caps.signal == SIGNAL_TYPE_NONE ?
+ "Disconnected":"Connected"));
+
+ return true;
+}
+
+static enum hpd_source_id get_hpd_line(
+ struct dc_link *link)
+{
+ struct gpio *hpd;
+ enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
+
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (hpd) {
+ switch (dal_irq_get_source(hpd)) {
+ case DC_IRQ_SOURCE_HPD1:
+ hpd_id = HPD_SOURCEID1;
+ break;
+ case DC_IRQ_SOURCE_HPD2:
+ hpd_id = HPD_SOURCEID2;
+ break;
+ case DC_IRQ_SOURCE_HPD3:
+ hpd_id = HPD_SOURCEID3;
+ break;
+ case DC_IRQ_SOURCE_HPD4:
+ hpd_id = HPD_SOURCEID4;
+ break;
+ case DC_IRQ_SOURCE_HPD5:
+ hpd_id = HPD_SOURCEID5;
+ break;
+ case DC_IRQ_SOURCE_HPD6:
+ hpd_id = HPD_SOURCEID6;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ dal_gpio_destroy_irq(&hpd);
+ }
+
+ return hpd_id;
+}
+
+static enum channel_id get_ddc_line(struct dc_link *link)
+{
+ struct ddc *ddc;
+ enum channel_id channel = CHANNEL_ID_UNKNOWN;
+
+ ddc = dal_ddc_service_get_ddc_pin(link->ddc);
+
+ if (ddc) {
+ switch (dal_ddc_get_line(ddc)) {
+ case GPIO_DDC_LINE_DDC1:
+ channel = CHANNEL_ID_DDC1;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ channel = CHANNEL_ID_DDC2;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ channel = CHANNEL_ID_DDC3;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ channel = CHANNEL_ID_DDC4;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ channel = CHANNEL_ID_DDC5;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ channel = CHANNEL_ID_DDC6;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ channel = CHANNEL_ID_DDC_VGA;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ channel = CHANNEL_ID_I2C_PAD;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+ struct graphics_object_id encoder)
+{
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_A;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_B;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_C;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_D;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_E;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_F;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_G;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_NUTMEG_CRT;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_TRAVIS_CRT;
+ case ENUM_ID_2:
+ return TRANSMITTER_TRAVIS_LCD;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+}
+
+static bool construct(
+ struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ uint8_t i;
+ struct gpio *hpd_gpio = NULL;
+ struct ddc_service_init_data ddc_service_init_data = { { 0 } };
+ struct dc_context *dc_ctx = init_params->ctx;
+ struct encoder_init_data enc_init_data = { 0 };
+ struct integrated_info info = {{{ 0 }}};
+ struct dc_bios *bios = init_params->dc->ctx->dc_bios;
+ const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+
+ if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+ dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
+ __func__, init_params->connector_index);
+ goto create_fail;
+ }
+
+ hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (hpd_gpio != NULL)
+ link->irq_source_hpd = dal_irq_get_source(hpd_gpio);
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ if (hpd_gpio != NULL)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(hpd_gpio);
+
+ break;
+ case CONNECTOR_ID_EDP:
+ link->connector_signal = SIGNAL_TYPE_EDP;
+
+ if (hpd_gpio != NULL) {
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(hpd_gpio);
+ }
+ break;
+ default:
+ dm_logger_write(dc_ctx->logger, LOG_WARNING,
+ "Unsupported Connector type:%d!\n", link->link_id.id);
+ goto create_fail;
+ }
+
+ if (hpd_gpio != NULL) {
+ dal_gpio_destroy_irq(&hpd_gpio);
+ hpd_gpio = NULL;
+ }
+
+ /* TODO: #DAL3 Implement id to str function.*/
+ LINK_INFO("Connector[%d] description:"
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+
+ if (link->ddc == NULL) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ link->ddc_hw_inst =
+ dal_ddc_get_line(
+ dal_ddc_service_get_ddc_pin(link->ddc));
+
+ enc_init_data.ctx = dc_ctx;
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.connector = link->link_id;
+ enc_init_data.channel = get_ddc_line(link);
+ enc_init_data.hpd_source = get_hpd_line(link);
+
+ link->hpd_src = enc_init_data.hpd_source;
+
+ enc_init_data.transmitter =
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc = link->dc->res_pool->funcs->link_enc_create(
+ &enc_init_data);
+
+ if( link->link_enc == NULL) {
+ DC_ERROR("Failed to create link encoder!\n");
+ goto link_enc_create_fail;
+ }
+
+ link->link_enc_hw_inst = link->link_enc->transmitter;
+
+ for (i = 0; i < 4; i++) {
+ if (BP_RESULT_OK !=
+ bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+ DC_ERROR("Failed to find device tag!\n");
+ goto device_tag_fail;
+ }
+
+ /* Look for device tag that matches connector signal,
+ * CRT for rgb, LCD for other supported signal tyes
+ */
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
+ && link->connector_signal != SIGNAL_TYPE_RGB)
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
+ && link->connector_signal == SIGNAL_TYPE_RGB)
+ continue;
+ break;
+ }
+
+ if (bios->integrated_info)
+ info = *bios->integrated_info;
+
+ /* Look for channel mapping corresponding to connector and device tag */
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+ struct external_display_path *path =
+ &info.ext_disp_conn_info.path[i];
+ if (path->device_connector_id.enum_id == link->link_id.enum_id
+ && path->device_connector_id.id == link->link_id.id
+ && path->device_connector_id.type == link->link_id.type) {
+
+ if (link->device_tag.acpi_device != 0
+ && path->device_acpi_enum == link->device_tag.acpi_device) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ } else if (path->device_tag ==
+ link->device_tag.dev_id.raw_device_tag) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ }
+ break;
+ }
+ }
+
+ /*
+ * TODO check if GPIO programmed correctly
+ *
+ * If GPIO isn't programmed correctly HPD might not rise or drain
+ * fast enough, leading to bounces.
+ */
+ program_hpd_filter(link);
+
+ return true;
+device_tag_fail:
+ link->link_enc->funcs->destroy(&link->link_enc);
+link_enc_create_fail:
+ dal_ddc_service_destroy(&link->ddc);
+ddc_create_fail:
+create_fail:
+
+ if (hpd_gpio != NULL) {
+ dal_gpio_destroy_irq(&hpd_gpio);
+ }
+
+ return false;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+struct dc_link *link_create(const struct link_init_data *init_params)
+{
+ struct dc_link *link =
+ kzalloc(sizeof(*link), GFP_KERNEL);
+
+ if (NULL == link)
+ goto alloc_fail;
+
+ if (false == construct(link, init_params))
+ goto construct_fail;
+
+ return link;
+
+construct_fail:
+ kfree(link);
+
+alloc_fail:
+ return NULL;
+}
+
+void link_destroy(struct dc_link **link)
+{
+ destruct(*link);
+ kfree(*link);
+ *link = NULL;
+}
+
+static void dpcd_configure_panel_mode(
+ struct dc_link *link,
+ enum dp_panel_mode panel_mode)
+{
+ union dpcd_edp_config edp_config_set;
+ bool panel_mode_edp = false;
+
+ memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+ if (DP_PANEL_MODE_DEFAULT != panel_mode) {
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
+
+ default:
+ break;
+ }
+
+ /*set edp panel mode in receiver*/
+ core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ if (edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+ enum ddc_result result = DDC_RESULT_UNKNOWN;
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ ASSERT(result == DDC_RESULT_SUCESSFULL);
+ }
+ }
+ dm_logger_write(link->ctx->logger, LOG_DETECTION_DP_CAPS,
+ "Link: %d eDP panel mode supported: %d "
+ "eDP panel mode enabled: %d \n",
+ link->link_index,
+ link->dpcd_caps.panel_mode_edp,
+ panel_mode_edp);
+}
+
+static void enable_stream_features(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ union down_spread_ctrl downspread;
+
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+}
+
+static enum dc_status enable_link_dp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_status status;
+ bool skip_video_pattern;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link_settings link_settings = {0};
+ enum dp_panel_mode panel_mode;
+ enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
+
+ /* get link settings for video mode timing */
+ decide_link_settings(stream, &link_settings);
+
+ /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
+ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+ */
+ if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_rate = LINK_RATE_HIGH3;
+
+ if (link_settings.link_rate == max_link_rate) {
+ if (state->dis_clk->funcs->set_min_clocks_state) {
+ if (state->dis_clk->cur_min_clks_state < DM_PP_CLOCKS_STATE_NOMINAL)
+ state->dis_clk->funcs->set_min_clocks_state(
+ state->dis_clk, DM_PP_CLOCKS_STATE_NOMINAL);
+ } else {
+ uint32_t dp_phyclk_in_khz;
+ const struct clocks_value clocks_value =
+ state->dis_clk->cur_clocks_value;
+
+ /* 27mhz = 27000000hz= 27000khz */
+ dp_phyclk_in_khz = link_settings.link_rate * 27000;
+
+ if (((clocks_value.max_non_dp_phyclk_in_khz != 0) &&
+ (dp_phyclk_in_khz > clocks_value.max_non_dp_phyclk_in_khz)) ||
+ (dp_phyclk_in_khz > clocks_value.max_dp_phyclk_in_khz)) {
+ state->dis_clk->funcs->apply_clock_voltage_request(
+ state->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ dp_phyclk_in_khz,
+ false,
+ true);
+ }
+ }
+ }
+
+ dp_enable_link_phy(
+ link,
+ pipe_ctx->stream->signal,
+ pipe_ctx->clock_source->id,
+ &link_settings);
+
+ panel_mode = dp_get_panel_mode(link);
+ dpcd_configure_panel_mode(link, panel_mode);
+
+ skip_video_pattern = true;
+
+ if (link_settings.link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(
+ link,
+ &link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS)) {
+ link->cur_link_settings = link_settings;
+ status = DC_OK;
+ }
+ else
+ status = DC_FAIL_DP_LINK_TRAINING;
+
+ enable_stream_features(pipe_ctx);
+
+ return status;
+}
+
+static enum dc_status enable_link_dp_mst(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* sink signal type after MST branch is MST. Multiple MST sinks
+ * share one link. Link DP PHY is enable or training only once.
+ */
+ if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
+ return DC_OK;
+
+ /* set the sink to MST mode before enabling the link */
+ dp_enable_mst_on_sink(link, true);
+
+ return enable_link_dp(state, pipe_ctx);
+}
+
+static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
+ enum engine_id eng_id,
+ struct ext_hdmi_settings *settings)
+{
+ bool result = false;
+ int i = 0;
+ struct integrated_info *integrated_info =
+ pipe_ctx->stream->ctx->dc_bios->integrated_info;
+
+ if (integrated_info == NULL)
+ return false;
+
+ /*
+ * Get retimer settings from sbios for passing SI eye test for DCE11
+ * The setting values are varied based on board revision and port id
+ * Therefore the setting values of each ports is passed by sbios.
+ */
+
+ // Check if current bios contains ext Hdmi settings
+ if (integrated_info->gpu_cap_info & 0x20) {
+ switch (eng_id) {
+ case ENGINE_ID_DIGA:
+ settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp0_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp0_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp1_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp1_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp2_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp2_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp3_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp3_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ if (result == true) {
+ // Validate settings from bios integrated info table
+ if (settings->slv_addr == 0)
+ return false;
+ if (settings->reg_num > 9)
+ return false;
+ if (settings->reg_num_6g > 3)
+ return false;
+
+ for (i = 0; i < settings->reg_num; i++) {
+ if (settings->reg_settings[i].i2c_reg_index > 0x20)
+ return false;
+ }
+
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
+ return false;
+ }
+ }
+ }
+
+ return result;
+}
+
+static bool i2c_write(struct pipe_ctx *pipe_ctx,
+ uint8_t address, uint8_t *buffer, uint32_t length)
+{
+ struct i2c_command cmd = {0};
+ struct i2c_payload payload = {0};
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.number_of_payloads = 1;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
+
+ payload.address = address;
+ payload.data = buffer;
+ payload.length = length;
+ payload.write = true;
+ cmd.payloads = &payload;
+
+ if (dc_submit_i2c(pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream->sink->link->link_index, &cmd))
+ return true;
+
+ return false;
+}
+
+static void write_i2c_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz,
+ struct ext_hdmi_settings *settings)
+{
+ uint8_t slave_address = (settings->slv_addr >> 1);
+ uint8_t buffer[2];
+ const uint8_t apply_rx_tx_change = 0x4;
+ uint8_t offset = 0xA;
+ uint8_t value = 0;
+ int i = 0;
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Start Ext-Hdmi programming*/
+
+ for (i = 0; i < settings->reg_num; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA ||
+ settings->reg_settings[i].i2c_reg_index == 0xB ||
+ settings->reg_settings[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings[i].i2c_reg_val;
+ else {
+ i2c_success =
+ dal_ddc_service_query_ddc_data(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+ }
+ }
+
+ /* Apply 3G settings */
+ if (is_over_340mhz) {
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings_6g[i].i2c_reg_val;
+ else {
+ i2c_success =
+ dal_ddc_service_query_ddc_data(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+ }
+ }
+ }
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ }
+}
+
+static void write_i2c_default_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xBA >> 1);
+ uint8_t buffer[2];
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Program Slave Address for tuning single integrity */
+ /* Write offset 0x0A to 0x13 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x13;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0B to 0xDA or 0xD8 */
+ buffer[0] = 0x0B;
+ buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0C to 0x1D or 0x91 */
+ buffer[0] = 0x0C;
+ buffer[1] = is_over_340mhz ? 0x1D : 0x91;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+ }
+}
+
+static void write_i2c_redriver_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xF0 >> 1);
+ uint8_t buffer[16];
+ bool i2c_success = false;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ // Program Slave Address for tuning single integrity
+ buffer[3] = 0x4E;
+ buffer[4] = 0x4E;
+ buffer[5] = 0x4E;
+ buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
+
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+ /* Write failure */
+ ASSERT(i2c_success);
+}
+
+static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ enum dc_color_depth display_color_depth;
+ enum engine_id eng_id;
+ struct ext_hdmi_settings settings = {0};
+ bool is_over_340mhz = false;
+ bool is_vga_mode = (stream->timing.h_addressable == 640)
+ && (stream->timing.v_addressable == 480);
+
+ if (stream->phy_pix_clk > 340000)
+ is_over_340mhz = true;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ unsigned short masked_chip_caps = pipe_ctx->stream->sink->link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
+ write_i2c_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz, &settings);
+ } else {
+ write_i2c_default_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz);
+ }
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
+ }
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ dal_ddc_service_write_scdc_data(
+ stream->sink->link->ddc,
+ stream->phy_pix_clk,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
+ memset(&stream->sink->link->cur_link_settings, 0,
+ sizeof(struct dc_link_settings));
+
+ display_color_depth = stream->timing.display_color_depth;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ display_color_depth = COLOR_DEPTH_888;
+
+ link->link_enc->funcs->enable_tmds_output(
+ link->link_enc,
+ pipe_ctx->clock_source->id,
+ display_color_depth,
+ pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+ stream->phy_pix_clk);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ dal_ddc_service_read_scdc_data(link->ddc);
+}
+
+/****************************enable_link***********************************/
+static enum dc_status enable_link(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ switch (pipe_ctx->stream->signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_dp(state, pipe_ctx);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ status = enable_link_dp_mst(state, pipe_ctx);
+ msleep(200);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ enable_link_hdmi(pipe_ctx);
+ status = DC_OK;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ status = DC_OK;
+ break;
+ default:
+ break;
+ }
+
+ if (pipe_ctx->stream_res.audio && status == DC_OK) {
+ /* notify audio driver for audio modes of monitor */
+ pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
+
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than per link */
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+ }
+
+ return status;
+}
+
+static void disable_link(struct dc_link *link, enum signal_type signal)
+{
+ /*
+ * TODO: implement call for dp_set_hw_test_pattern
+ * it is needed for compliance testing
+ */
+
+ /* here we need to specify that encoder output settings
+ * need to be calculated as for the set mode,
+ * it will lead to querying dynamic link capabilities
+ * which should be done before enable output */
+
+ if (dc_is_dp_signal(signal)) {
+ /* SST DP, eDP */
+ if (dc_is_dp_sst_signal(signal))
+ dp_disable_link_phy(link, signal);
+ else
+ dp_disable_link_phy_mst(link, signal);
+ } else
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+}
+
+bool dp_active_dongle_validate_timing(
+ const struct dc_crtc_timing *timing,
+ const struct dc_dongle_caps *dongle_caps)
+{
+ unsigned int required_pix_clk = timing->pix_clk_khz;
+
+ if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ dongle_caps->extendedCapValid == false)
+ return true;
+
+ /* Check Pixel Encoding */
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+ return false;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+ return false;
+ break;
+ default:
+ /* Invalid Pixel Encoding*/
+ return false;
+ }
+
+
+ /* Check Color Depth and Pixel Clock */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ required_pix_clk /= 2;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ /*888 and 666 should always be supported*/
+ break;
+ case COLOR_DEPTH_101010:
+ if (dongle_caps->dp_hdmi_max_bpc < 10)
+ return false;
+ required_pix_clk = required_pix_clk * 10 / 8;
+ break;
+ case COLOR_DEPTH_121212:
+ if (dongle_caps->dp_hdmi_max_bpc < 12)
+ return false;
+ required_pix_clk = required_pix_clk * 12 / 8;
+ break;
+
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* These color depths are currently not supported */
+ return false;
+ }
+
+ if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+ return false;
+
+ return true;
+}
+
+enum dc_status dc_link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+ struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
+
+ /* A hack to avoid failing any modes for EDID override feature on
+ * topology change such as lower quality cable for DP or different dongle
+ */
+ if (link->remote_sinks[0])
+ return DC_OK;
+
+ /* Passive Dongle */
+ if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
+ return DC_EXCEED_DONGLE_CAP;
+
+ /* Active Dongle*/
+ if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+ return DC_EXCEED_DONGLE_CAP;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (!dp_validate_mode_timing(
+ link,
+ timing))
+ return DC_NO_DP_LINK_BANDWIDTH;
+ break;
+
+ default:
+ break;
+ }
+
+ return DC_OK;
+}
+
+
+bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+ unsigned int controller_id = 0;
+ int i;
+
+ if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ return false;
+
+ dm_logger_write(link->ctx->logger, LOG_BACKLIGHT,
+ "New Backlight level: %d (0x%X)\n", level, level);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ if (stream != NULL) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream
+ == stream)
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+ controller_id =
+ core_dc->current_state->
+ res_ctx.pipe_ctx[i].stream_res.tg->inst +
+ 1;
+ }
+ }
+ abm->funcs->set_backlight_level(
+ abm,
+ level,
+ frame_ramp,
+ controller_id);
+ }
+
+ return true;
+}
+
+bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, enable, wait);
+
+ return true;
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
+ if (dmcu != NULL && link->psr_enabled)
+ dmcu->funcs->get_psr_state(dmcu, psr_state);
+
+ return true;
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int i;
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (link != NULL &&
+ dmcu != NULL) {
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (psr_config->psr_version == 0x2) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId = link->link_enc->transmitter;
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsyncRateHz = div64_u64(div64_u64((stream->
+ timing.pix_clk_khz * 1000),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->res_cap->num_timing_generator;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /*skip power down the single pipe since it blocks the cstate*/
+ if (ASIC_REV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev))
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION = 1;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ link->psr_enabled = true;
+ dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ return true;
+ } else
+ return false;
+
+}
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
+{
+ return &link->link_status;
+}
+
+void core_link_resume(struct dc_link *link)
+{
+ if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
+ program_hpd_filter(link);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
+{
+ struct dc_link_settings *link_settings =
+ &stream->sink->link->cur_link_settings;
+ uint32_t link_rate_in_mbps =
+ link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
+ struct fixed31_32 mbps = dal_fixed31_32_from_int(
+ link_rate_in_mbps * link_settings->lane_count);
+
+ return dal_fixed31_32_div_int(mbps, 54);
+}
+
+static int get_color_depth(enum dc_color_depth color_depth)
+{
+ switch (color_depth) {
+ case COLOR_DEPTH_666: return 6;
+ case COLOR_DEPTH_888: return 8;
+ case COLOR_DEPTH_101010: return 10;
+ case COLOR_DEPTH_121212: return 12;
+ case COLOR_DEPTH_141414: return 14;
+ case COLOR_DEPTH_161616: return 16;
+ default: return 0;
+ }
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+{
+ uint32_t bpc;
+ uint64_t kbps;
+ struct fixed31_32 peak_kbps;
+ uint32_t numerator;
+ uint32_t denominator;
+
+ bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
+ kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk * bpc * 3;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * PEAK_FACTOR_X1000;
+ denominator = 54 * 8 * 1000 * 1000;
+ kbps *= numerator;
+ peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
+
+ return peak_kbps;
+}
+
+static void update_mst_stream_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *stream_enc,
+ const struct dp_mst_stream_allocation_table *proposed_table)
+{
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
+ { 0 } };
+ struct link_mst_stream_allocation *dc_alloc;
+
+ int i;
+ int j;
+
+ /* if DRM proposed_table has more than one new payload */
+ ASSERT(proposed_table->stream_count -
+ link->mst_stream_alloc_table.stream_count < 2);
+
+ /* copy proposed_table to link, add stream encoder */
+ for (i = 0; i < proposed_table->stream_count; i++) {
+
+ for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+ dc_alloc =
+ &link->mst_stream_alloc_table.stream_allocations[j];
+
+ if (dc_alloc->vcp_id ==
+ proposed_table->stream_allocations[i].vcp_id) {
+
+ work_table[i] = *dc_alloc;
+ break; /* exit j loop */
+ }
+ }
+
+ /* new vcp_id */
+ if (j == link->mst_stream_alloc_table.stream_count) {
+ work_table[i].vcp_id =
+ proposed_table->stream_allocations[i].vcp_id;
+ work_table[i].slot_count =
+ proposed_table->stream_allocations[i].slot_count;
+ work_table[i].stream_enc = stream_enc;
+ }
+ }
+
+ /* update link->mst_stream_alloc_table with work_table */
+ link->mst_stream_alloc_table.stream_count =
+ proposed_table->stream_count;
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+ link->mst_stream_alloc_table.stream_allocations[i] =
+ work_table[i];
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ uint8_t i;
+
+ /* enable_link_dp_mst already check link->enabled_stream_count
+ * and stream is in link->stream[]. This is called during set mode,
+ * stream_enc is available.
+ */
+
+ /* get calculate VC payload for stream: stream_alloc */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ update_mst_stream_alloc_table(
+ link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+ }
+ else
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "stream_enc[%d]: 0x%x "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* program DP source TX for payload */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* send down message */
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* slot X.Y for only current stream */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_timing(pipe_ctx);
+ avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+
+}
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
+ uint8_t i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+
+ /* deallocate_mst_payload is called before disable link. When mode or
+ * disable/enable monitor, new stream is created which is not in link
+ * stream[] yet. For this, payload is not allocated yet, so de-alloc
+ * should not done. For new mode set, map_resources will get engine
+ * for new stream, so stream_enc->id should be validated until here.
+ */
+
+ /* slot X.Y */
+ stream_encoder->funcs->set_mst_bandwidth(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ /* TODO: which component is responsible for remove payload table? */
+ if (mst_mode) {
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ false)) {
+
+ update_mst_stream_alloc_table(
+ link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+ }
+ else {
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+ }
+
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "%s"
+ "stream_count: %d: ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ dm_logger_write(link->ctx->logger, LOG_MST,
+ "stream_enc[%d]: 0x%x "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ if (mst_mode) {
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+ }
+
+ return DC_OK;
+}
+
+void core_link_enable_stream(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ enum dc_status status = enable_link(state, pipe_ctx);
+
+ if (status != DC_OK) {
+ dm_logger_write(pipe_ctx->stream->ctx->logger,
+ LOG_WARNING, "enabling link %u failed: %d\n",
+ pipe_ctx->stream->sink->link->link_index,
+ status);
+
+ /* Abort stream enable *unless* the failure was due to
+ * DP link training - some DP monitors will recover and
+ * show the stream anyway. But MST displays can't proceed
+ * without link training.
+ */
+ if (status != DC_FAIL_DP_LINK_TRAINING ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ }
+
+ /* turn off otg test pattern if enable */
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
+
+ core_dc->hwss.enable_stream(pipe_ctx);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ core_dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->sink->link->cur_link_settings);
+}
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+
+ core_dc->hwss.disable_stream(pipe_ctx, option);
+
+ disable_link(pipe_ctx->stream->sink->link, pipe_ctx->stream->signal);
+}
+
+void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+ return;
+
+ core_dc->hwss.set_avmute(pipe_ctx, enable);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
new file mode 100644
index 000000000000..d5294798b0a5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -0,0 +1,775 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "gpio_service_interface.h"
+#include "include/ddc_service_types.h"
+#include "include/grph_object_id.h"
+#include "include/dpcd_defs.h"
+#include "include/logger_interface.h"
+#include "include/vector.h"
+#include "core_types.h"
+#include "dc_link_ddc.h"
+
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+
+/* CV smart dongle slave address for retrieving supported HDTV modes*/
+#define CV_SMART_DONGLE_ADDRESS 0x20
+/* DVI-HDMI dongle slave address for retrieving dongle signature*/
+#define DVI_HDMI_DONGLE_ADDRESS 0x68
+static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
+struct dvi_hdmi_dongle_signature_data {
+ int8_t vendor[3];/* "AMD" */
+ uint8_t version[2];
+ uint8_t size;
+ int8_t id[11];/* "6140063500G"*/
+};
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+struct dp_hdmi_dongle_signature_data {
+ int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+ uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS 0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+union hdmi_scdc_update_read_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t STATUS_UPDATE:1;
+ uint8_t CED_UPDATE:1;
+ uint8_t RR_TEST:1;
+ uint8_t RESERVED:5;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_status_flags_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t CLOCK_DETECTED:1;
+ uint8_t CH0_LOCKED:1;
+ uint8_t CH1_LOCKED:1;
+ uint8_t CH2_LOCKED:1;
+ uint8_t RESERVED:4;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_ced_data {
+ uint8_t byte[7];
+ struct {
+ uint8_t CH0_8LOW:8;
+ uint8_t CH0_7HIGH:7;
+ uint8_t CH0_VALID:1;
+ uint8_t CH1_8LOW:8;
+ uint8_t CH1_7HIGH:7;
+ uint8_t CH1_VALID:1;
+ uint8_t CH2_8LOW:8;
+ uint8_t CH2_7HIGH:7;
+ uint8_t CH2_VALID:1;
+ uint8_t CHECKSUM:8;
+ } fields;
+};
+
+union hdmi_scdc_test_config_Data {
+ uint8_t byte;
+ struct {
+ uint8_t TEST_READ_REQUEST_DELAY:7;
+ uint8_t TEST_READ_REQUEST: 1;
+ } fields;
+};
+
+struct i2c_payloads {
+ struct vector payloads;
+};
+
+struct aux_payloads {
+ struct vector payloads;
+};
+
+static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+ struct i2c_payloads *payloads;
+
+ payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
+
+ if (!payloads)
+ return NULL;
+
+ if (dal_vector_construct(
+ &payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+ return payloads;
+
+ kfree(payloads);
+ return NULL;
+
+}
+
+static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+{
+ return (struct i2c_payload *)p->payloads.container;
+}
+
+static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+{
+ return p->payloads.count;
+}
+
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+{
+ if (!p || !*p)
+ return;
+ dal_vector_destruct(&(*p)->payloads);
+ kfree(*p);
+ *p = NULL;
+
+}
+
+static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+ struct aux_payloads *payloads;
+
+ payloads = kzalloc(sizeof(struct aux_payloads), GFP_KERNEL);
+
+ if (!payloads)
+ return NULL;
+
+ if (dal_vector_construct(
+ &payloads->payloads, ctx, count, sizeof(struct aux_payload)))
+ return payloads;
+
+ kfree(payloads);
+ return NULL;
+}
+
+static struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
+{
+ return (struct aux_payload *)p->payloads.container;
+}
+
+static uint32_t dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
+{
+ return p->payloads.count;
+}
+
+static void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
+{
+ if (!p || !*p)
+ return;
+
+ dal_vector_destruct(&(*p)->payloads);
+ kfree(*p);
+ *p = NULL;
+}
+
+#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+void dal_ddc_i2c_payloads_add(
+ struct i2c_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write)
+{
+ uint32_t payload_size = EDID_SEGMENT_SIZE;
+ uint32_t pos;
+
+ for (pos = 0; pos < len; pos += payload_size) {
+ struct i2c_payload payload = {
+ .write = write,
+ .address = address,
+ .length = DDC_MIN(payload_size, len - pos),
+ .data = data + pos };
+ dal_vector_append(&payloads->payloads, &payload);
+ }
+
+}
+
+void dal_ddc_aux_payloads_add(
+ struct aux_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write)
+{
+ uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
+ uint32_t pos;
+
+ for (pos = 0; pos < len; pos += payload_size) {
+ struct aux_payload payload = {
+ .i2c_over_aux = true,
+ .write = write,
+ .address = address,
+ .length = DDC_MIN(payload_size, len - pos),
+ .data = data + pos };
+ dal_vector_append(&payloads->payloads, &payload);
+ }
+}
+
+static void construct(
+ struct ddc_service *ddc_service,
+ struct ddc_service_init_data *init_data)
+{
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(init_data->id);
+
+ struct gpio_service *gpio_service = init_data->ctx->gpio_service;
+ struct graphics_object_i2c_info i2c_info;
+ struct gpio_ddc_hw_info hw_info;
+ struct dc_bios *dcb = init_data->ctx->dc_bios;
+
+ ddc_service->link = init_data->link;
+ ddc_service->ctx = init_data->ctx;
+
+ if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
+ ddc_service->ddc_pin = NULL;
+ } else {
+ hw_info.ddc_channel = i2c_info.i2c_line;
+ hw_info.hw_supported = i2c_info.i2c_hw_assist;
+
+ ddc_service->ddc_pin = dal_gpio_create_ddc(
+ gpio_service,
+ i2c_info.gpio_info.clk_a_register_index,
+ 1 << i2c_info.gpio_info.clk_a_shift,
+ &hw_info);
+ }
+
+ ddc_service->flags.EDID_QUERY_DONE_ONCE = false;
+ ddc_service->flags.FORCE_READ_REPEATED_START = false;
+ ddc_service->flags.EDID_STRESS_READ = false;
+
+ ddc_service->flags.IS_INTERNAL_DISPLAY =
+ connector_id == CONNECTOR_ID_EDP ||
+ connector_id == CONNECTOR_ID_LVDS;
+
+ ddc_service->wa.raw = 0;
+}
+
+struct ddc_service *dal_ddc_service_create(
+ struct ddc_service_init_data *init_data)
+{
+ struct ddc_service *ddc_service;
+
+ ddc_service = kzalloc(sizeof(struct ddc_service), GFP_KERNEL);
+
+ if (!ddc_service)
+ return NULL;
+
+ construct(ddc_service, init_data);
+ return ddc_service;
+}
+
+static void destruct(struct ddc_service *ddc)
+{
+ if (ddc->ddc_pin)
+ dal_gpio_destroy_ddc(&ddc->ddc_pin);
+}
+
+void dal_ddc_service_destroy(struct ddc_service **ddc)
+{
+ if (!ddc || !*ddc) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ destruct(*ddc);
+ kfree(*ddc);
+ *ddc = NULL;
+}
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
+{
+ return DDC_SERVICE_TYPE_CONNECTOR;
+}
+
+void dal_ddc_service_set_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type)
+{
+ ddc->transaction_type = type;
+}
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+{
+ switch (ddc->transaction_type) {
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type)
+{
+ ddc->dongle_type = dongle_type;
+}
+
+static uint32_t defer_delay_converter_wa(
+ struct ddc_service *ddc,
+ uint32_t defer_delay)
+{
+ struct dc_link *link = ddc->link;
+
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_4 &&
+ !memcmp(link->dpcd_caps.branch_dev_name,
+ DP_DVI_CONVERTER_ID_4,
+ sizeof(link->dpcd_caps.branch_dev_name)))
+ return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ?
+ defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY;
+
+ return defer_delay;
+}
+
+#define DP_TRANSLATOR_DELAY 5
+
+uint32_t get_defer_delay(struct ddc_service *ddc)
+{
+ uint32_t defer_delay = 0;
+
+ switch (ddc->transaction_type) {
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+ if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) ||
+ (DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) ||
+ (DISPLAY_DONGLE_DP_HDMI_CONVERTER ==
+ ddc->dongle_type)) {
+
+ defer_delay = DP_TRANSLATOR_DELAY;
+
+ defer_delay =
+ defer_delay_converter_wa(ddc, defer_delay);
+
+ } else /*sink has a delay different from an Active Converter*/
+ defer_delay = 0;
+ break;
+ case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+ defer_delay = DP_TRANSLATOR_DELAY;
+ break;
+ default:
+ break;
+ }
+ return defer_delay;
+}
+
+static bool i2c_read(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *buffer,
+ uint32_t len)
+{
+ uint8_t offs_data = 0;
+ struct i2c_payload payloads[2] = {
+ {
+ .write = true,
+ .address = address,
+ .length = 1,
+ .data = &offs_data },
+ {
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = buffer } };
+
+ struct i2c_command command = {
+ .payloads = payloads,
+ .number_of_payloads = 2,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ return dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+}
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap)
+{
+ uint8_t i;
+ bool is_valid_hdmi_signature;
+ enum display_dongle_type *dongle = &sink_cap->dongle_type;
+ uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+ bool is_type2_dongle = false;
+ struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+ /* Assume we have no valid DP passive dongle connected */
+ *dongle = DISPLAY_DONGLE_NONE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+ /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+ if (!i2c_read(
+ ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf))) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
+
+ /* Check if Type 2 dongle.*/
+ if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+ is_type2_dongle = true;
+
+ dongle_signature =
+ (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+ is_valid_hdmi_signature = true;
+
+ /* Check EOT */
+ if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+ is_valid_hdmi_signature = false;
+ }
+
+ /* Check signature */
+ for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+ /* If its not the right signature,
+ * skip mismatch in subversion byte.*/
+ if (dongle_signature->id[i] !=
+ dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+ if (is_type2_dongle) {
+ is_valid_hdmi_signature = false;
+ break;
+ }
+
+ }
+ }
+
+ if (is_type2_dongle) {
+ uint32_t max_tmds_clk =
+ type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+ max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+ if (0 == max_tmds_clk ||
+ max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+ max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle %dMhz: ",
+ max_tmds_clk);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
+ max_tmds_clk);
+
+ }
+
+ /* Multiply by 1000 to convert to kHz. */
+ sink_cap->max_hdmi_pixel_clock =
+ max_tmds_clk * 1000;
+ }
+
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ }
+ }
+
+ return;
+}
+
+enum {
+ DP_SINK_CAP_SIZE =
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
+};
+
+bool dal_ddc_service_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size)
+{
+ bool ret;
+ uint32_t payload_size =
+ dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+ DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
+
+ uint32_t write_payloads =
+ (write_size + payload_size - 1) / payload_size;
+
+ uint32_t read_payloads =
+ (read_size + payload_size - 1) / payload_size;
+
+ uint32_t payloads_num = write_payloads + read_payloads;
+
+ if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+ return false;
+
+ /*TODO: len of payload data for i2c and aux is uint8!!!!,
+ * but we want to read 256 over i2c!!!!*/
+ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+
+ struct aux_payloads *payloads =
+ dal_ddc_aux_payloads_create(ddc->ctx, payloads_num);
+
+ struct aux_command command = {
+ .payloads = dal_ddc_aux_payloads_get(payloads),
+ .number_of_payloads = 0,
+ .defer_delay = get_defer_delay(ddc),
+ .max_defer_write_retry = 0 };
+
+ dal_ddc_aux_payloads_add(
+ payloads, address, write_size, write_buf, true);
+
+ dal_ddc_aux_payloads_add(
+ payloads, address, read_size, read_buf, false);
+
+ command.number_of_payloads =
+ dal_ddc_aux_payloads_get_count(payloads);
+
+ ret = dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command);
+
+ dal_ddc_aux_payloads_destroy(&payloads);
+
+ } else {
+ struct i2c_payloads *payloads =
+ dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+
+ struct i2c_command command = {
+ .payloads = dal_ddc_i2c_payloads_get(payloads),
+ .number_of_payloads = 0,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ dal_ddc_i2c_payloads_add(
+ payloads, address, write_size, write_buf, true);
+
+ dal_ddc_i2c_payloads_add(
+ payloads, address, read_size, read_buf, false);
+
+ command.number_of_payloads =
+ dal_ddc_i2c_payloads_get_count(payloads);
+
+ ret = dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+
+ dal_ddc_i2c_payloads_destroy(&payloads);
+ }
+
+ return ret;
+}
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t len)
+{
+ struct aux_payload read_payload = {
+ .i2c_over_aux = i2c,
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = data,
+ };
+ struct aux_command command = {
+ .payloads = &read_payload,
+ .number_of_payloads = 1,
+ .defer_delay = 0,
+ .max_defer_write_retry = 0,
+ .mot = mot
+ };
+
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+ }
+
+ if (dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command))
+ return DDC_RESULT_SUCESSFULL;
+
+ return DDC_RESULT_FAILED_OPERATION;
+}
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t len)
+{
+ struct aux_payload write_payload = {
+ .i2c_over_aux = i2c,
+ .write = true,
+ .address = address,
+ .length = len,
+ .data = (uint8_t *)data,
+ };
+ struct aux_command command = {
+ .payloads = &write_payload,
+ .number_of_payloads = 1,
+ .defer_delay = 0,
+ .max_defer_write_retry = 0,
+ .mot = mot
+ };
+
+ if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+ BREAK_TO_DEBUGGER();
+ return DDC_RESULT_FAILED_INVALID_OPERATION;
+ }
+
+ if (dal_i2caux_submit_aux_command(
+ ddc->ctx->i2caux,
+ ddc->ddc_pin,
+ &command))
+ return DDC_RESULT_SUCESSFULL;
+
+ return DDC_RESULT_FAILED_OPERATION;
+}
+
+/*test only function*/
+void dal_ddc_service_set_ddc_pin(
+ struct ddc_service *ddc_service,
+ struct ddc *ddc)
+{
+ ddc_service->ddc_pin = ddc;
+}
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+{
+ return ddc_service->ddc_pin;
+}
+
+void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble)
+{
+ bool over_340_mhz = pix_clk > 340000 ? 1 : 0;
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_SINK_VERSION;
+ uint8_t sink_version = 0;
+ uint8_t write_buffer[2] = {0};
+ /*Lower than 340 Scramble bit from SCDC caps*/
+
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &sink_version, sizeof(sink_version));
+ if (sink_version == 1) {
+ /*Source Version = 1*/
+ write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
+ write_buffer[1] = 1;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ write_buffer, sizeof(write_buffer), NULL, 0);
+ /*Read Request from SCDC caps*/
+ }
+ write_buffer[0] = HDMI_SCDC_TMDS_CONFIG;
+
+ if (over_340_mhz) {
+ write_buffer[1] = 3;
+ } else if (lte_340_scramble) {
+ write_buffer[1] = 1;
+ } else {
+ write_buffer[1] = 0;
+ }
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+ sizeof(write_buffer), NULL, 0);
+}
+
+void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+{
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
+ uint8_t tmds_config = 0;
+
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &tmds_config, sizeof(tmds_config));
+ if (tmds_config & 0x1) {
+ union hdmi_scdc_status_flags_data status_data = { {0} };
+ uint8_t scramble_status = 0;
+
+ offset = HDMI_SCDC_SCRAMBLER_STATUS;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ &offset, sizeof(offset), &scramble_status,
+ sizeof(scramble_status));
+ offset = HDMI_SCDC_STATUS_FLAGS;
+ dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ &offset, sizeof(offset), status_data.byte,
+ sizeof(status_data.byte));
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
new file mode 100644
index 000000000000..e6bf05d76a94
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -0,0 +1,2601 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_link_dp.h"
+#include "dm_helpers.h"
+
+#include "inc/core_types.h"
+#include "link_hwss.h"
+#include "dc_link_ddc.h"
+#include "core_status.h"
+#include "dpcd_defs.h"
+
+#include "resource.h"
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_DISABLED };
+
+enum {
+ POST_LT_ADJ_REQ_LIMIT = 6,
+ POST_LT_ADJ_REQ_TIMEOUT = 200
+};
+
+enum {
+ LINK_TRAINING_MAX_RETRY_COUNT = 5,
+ /* to avoid infinite loop where-in the receiver
+ * switches between different VS
+ */
+ LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
+static bool decide_fallback_link_setting(
+ struct dc_link_settings initial_link_settings,
+ struct dc_link_settings *current_link_setting,
+ enum link_training_result training_result);
+static struct dc_link_settings get_common_supported_link_settings (
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b);
+
+static void wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t default_wait_in_micro_secs)
+{
+ union training_aux_rd_interval training_rd_interval;
+
+ /* overwrite the delay if rev > 1.1*/
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ /* DP 1.2 or later - retrieve delay through
+ * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ default_wait_in_micro_secs =
+ training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+
+ udelay(default_wait_in_micro_secs);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n wait = %d\n",
+ __func__,
+ default_wait_in_micro_secs);
+}
+
+static void dpcd_set_training_pattern(
+ struct dc_link *link,
+ union dpcd_training_pattern dpcd_pattern)
+{
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ 1);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
+{
+ uint8_t rate = (uint8_t)
+ (lt_settings->link_settings.link_rate);
+
+ union down_spread_ctrl downspread = {{0}};
+ union lane_count_set lane_count_set = {{0}};
+ uint8_t link_set_buffer[2];
+
+ downspread.raw = (uint8_t)
+ (lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = 1;
+
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+
+ link_set_buffer[0] = rate;
+ link_set_buffer[1] = lane_count_set.raw;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET,
+ link_set_buffer, 2);
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+}
+
+static enum dpcd_training_patterns
+ hw_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern)
+{
+ enum dpcd_training_patterns dpcd_tr_pattern =
+ DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ switch (pattern) {
+ case HW_DP_TRAINING_PATTERN_1:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+ break;
+ case HW_DP_TRAINING_PATTERN_2:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+ break;
+ case HW_DP_TRAINING_PATTERN_3:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+ break;
+ case HW_DP_TRAINING_PATTERN_4:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
+ break;
+ default:
+ ASSERT(0);
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+
+ return dpcd_tr_pattern;
+
+}
+
+static void dpcd_set_lt_pattern_and_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum hw_dp_training_pattern pattern)
+{
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET;
+ uint8_t dpcd_lt_buffer[5] = {0};
+ union dpcd_training_pattern dpcd_pattern = {{0}};
+ uint32_t lane;
+ uint32_t size_in_bytes;
+ bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+
+ /*****************************************************************
+ * DpcdAddress_TrainingPatternSet
+ *****************************************************************/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ hw_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+ dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+ = dpcd_pattern.raw;
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+
+ /*****************************************************************
+ * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
+ *****************************************************************/
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count); lane++) {
+
+ dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
+
+ dpcd_lane[lane].bits.MAX_SWING_REACHED =
+ (lt_settings->lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (lt_settings->lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+
+ /* concatinate everything into one buffer*/
+
+ size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+
+ // 0x00103 - 0x00102
+ memmove(
+ &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - dpcd_base_lt_offset],
+ dpcd_lane,
+ size_in_bytes);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n %x VS set = %x PE set = %x \
+ max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ if (edp_workaround) {
+ /* for eDP write in 2 parts because the 5-byte burst is
+ * causing issues on some eDP panels (EPR#366724)
+ */
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw) );
+
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(dpcd_lane),
+ size_in_bytes);
+
+ } else
+ /* write it all in (1 + number-of-lanes)-byte burst*/
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ size_in_bytes + sizeof(dpcd_pattern.raw) );
+
+ link->cur_lane_setting = lt_settings->lane_settings[0];
+}
+
+static bool is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool done = true;
+ uint32_t lane;
+ /*LANEx_CR_DONE bits All 1's?*/
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+ done = false;
+ }
+ return done;
+
+}
+
+static bool is_ch_eq_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status,
+ union lane_align_status_updated *lane_status_updated)
+{
+ bool done = true;
+ uint32_t lane;
+ if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
+ done = false;
+ else {
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
+ !dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+ done = false;
+ }
+ }
+ return done;
+
+}
+
+static void update_drive_settings(
+ struct link_training_settings *dest,
+ struct link_training_settings src)
+{
+ uint32_t lane;
+ for (lane = 0; lane < src.link_settings.lane_count; lane++) {
+ dest->lane_settings[lane].VOLTAGE_SWING =
+ src.lane_settings[lane].VOLTAGE_SWING;
+ dest->lane_settings[lane].PRE_EMPHASIS =
+ src.lane_settings[lane].PRE_EMPHASIS;
+ dest->lane_settings[lane].POST_CURSOR2 =
+ src.lane_settings[lane].POST_CURSOR2;
+ }
+}
+
+static uint8_t get_nibble_at_index(const uint8_t *buf,
+ uint32_t index)
+{
+ uint8_t nibble;
+ nibble = buf[index / 2];
+
+ if (index % 2)
+ nibble >>= 4;
+ else
+ nibble &= 0x0F;
+
+ return nibble;
+}
+
+static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+ enum dc_voltage_swing voltage)
+{
+ enum dc_pre_emphasis pre_emphasis;
+ pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+ if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+ pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+ return pre_emphasis;
+
+}
+
+static void find_max_drive_settings(
+ const struct link_training_settings *link_training_setting,
+ struct link_training_settings *max_lt_setting)
+{
+ uint32_t lane;
+ struct dc_lane_settings max_requested;
+
+ max_requested.VOLTAGE_SWING =
+ link_training_setting->
+ lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS =
+ link_training_setting->
+ lane_settings[0].PRE_EMPHASIS;
+ /*max_requested.postCursor2 =
+ * link_training_setting->laneSettings[0].postCursor2;*/
+
+ /* Determine what the maximum of the requested settings are*/
+ for (lane = 1; lane < link_training_setting->link_settings.lane_count;
+ lane++) {
+ if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
+ max_requested.VOLTAGE_SWING)
+
+ max_requested.VOLTAGE_SWING =
+ link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING;
+
+ if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
+ max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS =
+ link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS;
+
+ /*
+ if (link_training_setting->laneSettings[lane].postCursor2 >
+ max_requested.postCursor2)
+ {
+ max_requested.postCursor2 =
+ link_training_setting->laneSettings[lane].postCursor2;
+ }
+ */
+ }
+
+ /* make sure the requested settings are
+ * not higher than maximum settings*/
+ if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+ max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+ if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+ max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+ /*
+ if (max_requested.postCursor2 > PostCursor2_MaxLevel)
+ max_requested.postCursor2 = PostCursor2_MaxLevel;
+ */
+
+ /* make sure the pre-emphasis matches the voltage swing*/
+ if (max_requested.PRE_EMPHASIS >
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING))
+ max_requested.PRE_EMPHASIS =
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING);
+
+ /*
+ * Post Cursor2 levels are completely independent from
+ * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
+ * can only be applied to each allowable combination of voltage
+ * swing and pre-emphasis levels */
+ /* if ( max_requested.postCursor2 >
+ * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
+ * max_requested.postCursor2 =
+ * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
+ */
+
+ max_lt_setting->link_settings.link_rate =
+ link_training_setting->link_settings.link_rate;
+ max_lt_setting->link_settings.lane_count =
+ link_training_setting->link_settings.lane_count;
+ max_lt_setting->link_settings.link_spread =
+ link_training_setting->link_settings.link_spread;
+
+ for (lane = 0; lane <
+ link_training_setting->link_settings.lane_count;
+ lane++) {
+ max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
+ max_requested.VOLTAGE_SWING;
+ max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
+ max_requested.PRE_EMPHASIS;
+ /*max_lt_setting->laneSettings[lane].postCursor2 =
+ * max_requested.postCursor2;
+ */
+ }
+
+}
+
+static void get_lane_status_and_drive_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status *ln_status,
+ union lane_align_status_updated *ln_status_updated,
+ struct link_training_settings *req_settings)
+{
+ uint8_t dpcd_buf[6] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
+ struct link_training_settings request_settings = {{0}};
+ uint32_t lane;
+
+ memset(req_settings, '\0', sizeof(struct link_training_settings));
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ ln_status[lane].raw =
+ get_nibble_at_index(&dpcd_buf[0], lane);
+ dpcd_lane_adjust[lane].raw =
+ get_nibble_at_index(&dpcd_buf[4], lane);
+ }
+
+ ln_status_updated->raw = dpcd_buf[2];
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
+ __func__,
+ DP_LANE0_1_STATUS, dpcd_buf[0],
+ DP_LANE2_3_STATUS, dpcd_buf[1]);
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
+ __func__,
+ DP_ADJUST_REQUEST_LANE0_1,
+ dpcd_buf[4],
+ DP_ADJUST_REQUEST_LANE2_3,
+ dpcd_buf[5]);
+
+ /*copy to req_settings*/
+ request_settings.link_settings.lane_count =
+ link_training_setting->link_settings.lane_count;
+ request_settings.link_settings.link_rate =
+ link_training_setting->link_settings.link_rate;
+ request_settings.link_settings.link_spread =
+ link_training_setting->link_settings.link_spread;
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ request_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ request_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ }
+
+ /*Note: for postcursor2, read adjusted
+ * postcursor2 settings from*/
+ /*DpcdAddress_AdjustRequestPostCursor2 =
+ *0x020C (not implemented yet)*/
+
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ find_max_drive_settings(&request_settings, req_settings);
+
+ /* if post cursor 2 is needed in the future,
+ * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
+ */
+
+}
+
+static void dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+ uint32_t lane;
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->
+ link_settings.lane_count);
+ lane++) {
+ dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane[lane].bits.MAX_SWING_REACHED =
+ (link_training_setting->
+ lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (link_training_setting->
+ lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+
+ core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(dpcd_lane),
+ link_training_setting->link_settings.lane_count);
+
+ /*
+ if (LTSettings.link.rate == LinkRate_High2)
+ {
+ DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
+ for ( uint32_t lane = 0;
+ lane < lane_count_DPMax; lane++)
+ {
+ dpcd_lane2[lane].bits.post_cursor2_set =
+ static_cast<unsigned char>(
+ LTSettings.laneSettings[lane].postCursor2);
+ dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
+ }
+ m_pDpcdAccessSrv->WriteDpcdData(
+ DpcdAddress_Lane0Set2,
+ reinterpret_cast<unsigned char*>(dpcd_lane2),
+ LTSettings.link.lanes);
+ }
+ */
+
+ dm_logger_write(link->ctx->logger, LOG_HW_LINK_TRAINING,
+ "%s\n %x VS set = %x PE set = %x \
+ max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+ dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+ dpcd_lane[0].bits.MAX_SWING_REACHED,
+ dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ link->cur_lane_setting = link_training_setting->lane_settings[0];
+
+}
+
+static bool is_max_vs_reached(
+ const struct link_training_settings *lt_settings)
+{
+ uint32_t lane;
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count);
+ lane++) {
+ if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+ == VOLTAGE_SWING_MAX_LEVEL)
+ return true;
+ }
+ return false;
+
+}
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* program ASIC PHY settings*/
+ dp_set_hw_lane_settings(link, lt_settings);
+
+ /* Notify DP sink the PHY settings from source */
+ dpcd_set_lane_settings(link, lt_settings);
+}
+
+static bool perform_post_lt_adj_req_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+
+ uint32_t adj_req_count;
+ uint32_t adj_req_timer;
+ bool req_drv_setting_changed;
+ uint32_t lane;
+
+ req_drv_setting_changed = false;
+ for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+ adj_req_count++) {
+
+ req_drv_setting_changed = false;
+
+ for (adj_req_timer = 0;
+ adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+ adj_req_timer++) {
+
+ struct link_training_settings req_settings;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated
+ dpcd_lane_status_updated;
+
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ if (dpcd_lane_status_updated.bits.
+ POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+ return true;
+
+ if (!is_cr_done(lane_count, dpcd_lane_status))
+ return false;
+
+ if (!is_ch_eq_done(
+ lane_count,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated))
+ return false;
+
+ for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+ if (lt_settings->
+ lane_settings[lane].VOLTAGE_SWING !=
+ req_settings.lane_settings[lane].
+ VOLTAGE_SWING ||
+ lt_settings->lane_settings[lane].PRE_EMPHASIS !=
+ req_settings.lane_settings[lane].PRE_EMPHASIS) {
+
+ req_drv_setting_changed = true;
+ break;
+ }
+ }
+
+ if (req_drv_setting_changed) {
+ update_drive_settings(
+ lt_settings,req_settings);
+
+ dc_link_dp_set_drive_settings(link,
+ lt_settings);
+ break;
+ }
+
+ msleep(1);
+ }
+
+ if (!req_drv_setting_changed) {
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "%s: Post Link Training Adjust Request Timed out\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+ }
+ }
+ dm_logger_write(link->ctx->logger, LOG_WARNING,
+ "%s: Post Link Training Adjust Request limit reached\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+
+}
+
+static enum hw_dp_training_pattern get_supported_tp(struct dc_link *link)
+{
+ enum hw_dp_training_pattern highest_tp = HW_DP_TRAINING_PATTERN_2;
+ struct encoder_feature_support *features = &link->link_enc->features;
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+ if (features->flags.bits.IS_TPS3_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_3;
+
+ if (features->flags.bits.IS_TPS4_CAPABLE)
+ highest_tp = HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_4)
+ return HW_DP_TRAINING_PATTERN_4;
+
+ if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED &&
+ highest_tp >= HW_DP_TRAINING_PATTERN_3)
+ return HW_DP_TRAINING_PATTERN_3;
+
+ return HW_DP_TRAINING_PATTERN_2;
+}
+
+static enum link_training_result perform_channel_equalization_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ struct link_training_settings req_settings;
+ enum hw_dp_training_pattern hw_tr_pattern;
+ uint32_t retries_ch_eq;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {{0}};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+
+ hw_tr_pattern = get_supported_tp(link);
+
+ dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, lt_settings);
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ hw_tr_pattern);
+ else
+ dpcd_set_lane_settings(link, lt_settings);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_for_training_aux_rd_interval(link, 400);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink*/
+
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ /* 5. check CR done*/
+ if (!is_cr_done(lane_count, dpcd_lane_status))
+ return LINK_TRAINING_EQ_FAIL_CR;
+
+ /* 6. check CHEQ done*/
+ if (is_ch_eq_done(lane_count,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated))
+ return LINK_TRAINING_SUCCESS;
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ update_drive_settings(lt_settings, req_settings);
+ }
+
+ return LINK_TRAINING_EQ_FAIL_EQ;
+
+}
+
+static bool perform_clock_recovery_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t lane;
+ struct link_training_settings req_settings;
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+ enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+
+ retries_cr = 0;
+ retry_count = 0;
+ /* initial drive setting (VS/PE/PC2)*/
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->lane_settings[lane].VOLTAGE_SWING =
+ VOLTAGE_SWING_LEVEL0;
+ lt_settings->lane_settings[lane].PRE_EMPHASIS =
+ PRE_EMPHASIS_DISABLED;
+ lt_settings->lane_settings[lane].POST_CURSOR2 =
+ POST_CURSOR2_DISABLED;
+ }
+
+ dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+ /* najeeb - The synaptics MST hub can put the LT in
+ * infinite loop by switching the VS
+ */
+ /* between level 0 and level 1 continuously, here
+ * we try for CR lock for LinkTrainingMaxCRRetry count*/
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ /* 1. call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(
+ link,
+ lt_settings);
+
+ /* 2. update DPCD of the receiver*/
+ if (!retries_cr)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ hw_tr_pattern);
+ else
+ dpcd_set_lane_settings(
+ link,
+ lt_settings);
+
+ /* 3. wait receiver to lock-on*/
+ wait_for_training_aux_rd_interval(
+ link,
+ 100);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ get_lane_status_and_drive_settings(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ &req_settings);
+
+ /* 5. check CR done*/
+ if (is_cr_done(lane_count, dpcd_lane_status))
+ return true;
+
+ /* 6. max VS reached*/
+ if (is_max_vs_reached(lt_settings))
+ return false;
+
+ /* 7. same voltage*/
+ /* Note: VS same for all lanes,
+ * so comparing first lane is sufficient*/
+ if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+ req_settings.lane_settings[0].VOLTAGE_SWING)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ update_drive_settings(lt_settings, req_settings);
+
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ dm_logger_write(link->ctx->logger, LOG_ERROR,
+ "%s: Link Training Error, could not \
+ get CR after %d tries. \
+ Possibly voltage swing issue", __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ return false;
+}
+
+static inline bool perform_link_training_int(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ bool status)
+{
+ union lane_count_set lane_count_set = { {0} };
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+
+ /* 3. set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ dpcd_set_training_pattern(link, dpcd_pattern);
+
+ /* 4. mainlink output idle pattern*/
+ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ /*
+ * 5. post training adjust if required
+ * If the upstream DPTX and downstream DPRX both support TPS4,
+ * TPS4 must be used instead of POST_LT_ADJ_REQ.
+ */
+ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+ get_supported_tp(link) == HW_DP_TRAINING_PATTERN_4)
+ return status;
+
+ if (status &&
+ perform_post_lt_adj_req_sequence(link, lt_settings) == false)
+ status = false;
+
+ lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
+ lane_count_set.bits.ENHANCED_FRAMING = 1;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+ core_link_write_dpcd(
+ link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+
+ return status;
+}
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+
+ char *link_rate = "Unknown";
+ struct link_training_settings lt_settings;
+
+ memset(&lt_settings, '\0', sizeof(lt_settings));
+
+ lt_settings.link_settings.link_rate = link_setting->link_rate;
+ lt_settings.link_settings.lane_count = link_setting->lane_count;
+
+ /*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
+
+ /* TODO hard coded to SS for now
+ * lt_settings.link_settings.link_spread =
+ * dal_display_path_is_ss_supported(
+ * path_mode->display_path) ?
+ * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+ * LINK_SPREAD_DISABLED;
+ */
+ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ /* 1. set link rate, lane count and spread*/
+ dpcd_set_link_settings(link, &lt_settings);
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)*/
+ if (!perform_clock_recovery_sequence(link, &lt_settings)) {
+ status = LINK_TRAINING_CR_FAIL;
+ } else {
+ status = perform_channel_equalization_sequence(link,
+ &lt_settings);
+ }
+
+ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
+ if (!perform_link_training_int(link,
+ &lt_settings,
+ status == LINK_TRAINING_SUCCESS)) {
+ /* the next link training setting in this case
+ * would be the same as CR failure case.
+ */
+ status = LINK_TRAINING_CR_FAIL;
+ }
+ }
+
+ /* 6. print status message*/
+ switch (lt_settings.link_settings.link_rate) {
+
+ case LINK_RATE_LOW:
+ link_rate = "RBR";
+ break;
+ case LINK_RATE_HIGH:
+ link_rate = "HBR";
+ break;
+ case LINK_RATE_HIGH2:
+ link_rate = "HBR2";
+ break;
+ case LINK_RATE_RBR2:
+ link_rate = "RBR2";
+ break;
+ case LINK_RATE_HIGH3:
+ link_rate = "HBR3";
+ break;
+ default:
+ break;
+ }
+
+ /* Connectivity log: link training */
+ CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d",
+ link_rate,
+ lt_settings.link_settings.lane_count,
+ (status == LINK_TRAINING_SUCCESS) ? "pass" :
+ ((status == LINK_TRAINING_CR_FAIL) ? "CR failed" :
+ "EQ failed"),
+ lt_settings.lane_settings[0].VOLTAGE_SWING,
+ lt_settings.lane_settings[0].PRE_EMPHASIS);
+
+ return status;
+}
+
+
+bool perform_link_training_with_retries(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts)
+{
+ uint8_t j;
+ uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+
+ for (j = 0; j < attempts; ++j) {
+
+ if (dc_link_dp_perform_link_training(
+ link,
+ link_setting,
+ skip_video_pattern) == LINK_TRAINING_SUCCESS)
+ return true;
+
+ msleep(delay_between_attempts);
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ }
+
+ return false;
+}
+
+static struct dc_link_settings get_max_link_cap(struct dc_link *link)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+
+ /* Higher link settings based on feature supported */
+ if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ /* Lower link settings based on sink's link cap */
+ if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count =
+ link->reported_link_cap.lane_count;
+ if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate =
+ link->reported_link_cap.link_rate;
+ if (link->reported_link_cap.link_spread <
+ max_link_cap.link_spread)
+ max_link_cap.link_spread =
+ link->reported_link_cap.link_spread;
+ return max_link_cap;
+}
+
+bool dp_hbr_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting)
+{
+ struct dc_link_settings max_link_cap = {0};
+ struct dc_link_settings cur_link_setting = {0};
+ struct dc_link_settings *cur = &cur_link_setting;
+ struct dc_link_settings initial_link_settings = {0};
+ bool success;
+ bool skip_link_training;
+ bool skip_video_pattern;
+ struct clock_source *dp_cs;
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
+ enum link_training_result status;
+
+ success = false;
+ skip_link_training = false;
+
+ max_link_cap = get_max_link_cap(link);
+
+ /* TODO implement override and monitor patch later */
+
+ /* try to train the link from high to low to
+ * find the physical link capability
+ */
+ /* disable PHY done possible by BIOS, will be done by driver itself */
+ dp_disable_link_phy(link, link->connector_signal);
+
+ dp_cs = link->dc->res_pool->dp_clock_source;
+
+ if (dp_cs)
+ dp_cs_id = dp_cs->id;
+ else {
+ /*
+ * dp clock source is not initialized for some reason.
+ * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+ */
+ ASSERT(dp_cs);
+ }
+
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+ cur_link_setting = initial_link_settings;
+ do {
+ skip_video_pattern = true;
+
+ if (cur->link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ dp_enable_link_phy(
+ link,
+ link->connector_signal,
+ dp_cs_id,
+ cur);
+
+ if (skip_link_training)
+ success = true;
+ else {
+ status = dc_link_dp_perform_link_training(
+ link,
+ cur,
+ skip_video_pattern);
+ if (status == LINK_TRAINING_SUCCESS)
+ success = true;
+ }
+
+ if (success)
+ link->verified_link_cap = *cur;
+
+ /* always disable the link before trying another
+ * setting or before returning we'll enable it later
+ * based on the actual mode we're driving
+ */
+ dp_disable_link_phy(link, link->connector_signal);
+ } while (!success && decide_fallback_link_setting(
+ initial_link_settings, cur, status));
+
+ /* Link Training failed for all Link Settings
+ * (Lane Count is still unknown)
+ */
+ if (!success) {
+ /* If all LT fails for all settings,
+ * set verified = failed safe (1 lane low)
+ */
+ link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+ link->verified_link_cap.link_rate = LINK_RATE_LOW;
+
+ link->verified_link_cap.link_spread =
+ LINK_SPREAD_DISABLED;
+ }
+
+
+ return success;
+}
+
+static struct dc_link_settings get_common_supported_link_settings (
+ struct dc_link_settings link_setting_a,
+ struct dc_link_settings link_setting_b)
+{
+ struct dc_link_settings link_settings = {0};
+
+ link_settings.lane_count =
+ (link_setting_a.lane_count <=
+ link_setting_b.lane_count) ?
+ link_setting_a.lane_count :
+ link_setting_b.lane_count;
+ link_settings.link_rate =
+ (link_setting_a.link_rate <=
+ link_setting_b.link_rate) ?
+ link_setting_a.link_rate :
+ link_setting_b.link_rate;
+ link_settings.link_spread = LINK_SPREAD_DISABLED;
+
+ /* in DP compliance test, DPR-120 may have
+ * a random value in its MAX_LINK_BW dpcd field.
+ * We map it to the maximum supported link rate that
+ * is smaller than MAX_LINK_BW in this case.
+ */
+ if (link_settings.link_rate > LINK_RATE_HIGH3) {
+ link_settings.link_rate = LINK_RATE_HIGH3;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH3
+ && link_settings.link_rate > LINK_RATE_HIGH2) {
+ link_settings.link_rate = LINK_RATE_HIGH2;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH2
+ && link_settings.link_rate > LINK_RATE_HIGH) {
+ link_settings.link_rate = LINK_RATE_HIGH;
+ } else if (link_settings.link_rate < LINK_RATE_HIGH
+ && link_settings.link_rate > LINK_RATE_LOW) {
+ link_settings.link_rate = LINK_RATE_LOW;
+ } else if (link_settings.link_rate < LINK_RATE_LOW) {
+ link_settings.link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ return link_settings;
+}
+
+static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
+{
+ return lane_count <= LANE_COUNT_ONE;
+}
+
+static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
+{
+ return link_rate <= LINK_RATE_LOW;
+}
+
+static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_FOUR:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_ONE;
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_UNKNOWN;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_LOW;
+ case LINK_RATE_LOW:
+ return LINK_RATE_UNKNOWN;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_FOUR;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH3;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+/*
+ * function: set link rate and lane count fallback based
+ * on current link setting and last link training result
+ * return value:
+ * true - link setting could be set
+ * false - has reached minimum setting
+ * and no further fallback could be done
+ */
+static bool decide_fallback_link_setting(
+ struct dc_link_settings initial_link_settings,
+ struct dc_link_settings *current_link_setting,
+ enum link_training_result training_result)
+{
+ if (!current_link_setting)
+ return false;
+
+ switch (training_result) {
+ case LINK_TRAINING_CR_FAIL:
+ {
+ if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else if (!reached_minimum_lane_count
+ (current_link_setting->lane_count)) {
+ current_link_setting->link_rate =
+ initial_link_settings.link_rate;
+ current_link_setting->lane_count =
+ reduce_lane_count(
+ current_link_setting->lane_count);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ {
+ if (!reached_minimum_lane_count
+ (current_link_setting->lane_count)) {
+ current_link_setting->lane_count =
+ reduce_lane_count(
+ current_link_setting->lane_count);
+ } else if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_CR:
+ {
+ if (!reached_minimum_link_rate
+ (current_link_setting->link_rate)) {
+ current_link_setting->link_rate =
+ reduce_link_rate(
+ current_link_setting->link_rate);
+ } else {
+ return false;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+
+static uint32_t bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+ switch (timing->display_color_depth) {
+
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_khz;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1)
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+
+ return kbps;
+
+}
+
+static uint32_t bandwidth_in_kbps_from_link_settings(
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_rate_in_kbps = link_setting->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+
+ uint32_t lane_count = link_setting->lane_count;
+ uint32_t kbps = link_rate_in_kbps;
+ kbps *= lane_count;
+ kbps *= 8; /* 8 bits per byte*/
+
+ return kbps;
+
+}
+
+bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t req_bw;
+ uint32_t max_bw;
+
+ const struct dc_link_settings *link_setting;
+
+ /*always DP fail safe mode*/
+ if (timing->pix_clk_khz == (uint32_t)25175 &&
+ timing->h_addressable == (uint32_t)640 &&
+ timing->v_addressable == (uint32_t)480)
+ return true;
+
+ /* We always use verified link settings */
+ link_setting = &link->verified_link_cap;
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /*if (flags.DYNAMIC_VALIDATION == 1 &&
+ link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+ link_setting = &link->verified_link_cap;
+ */
+
+ req_bw = bandwidth_in_kbps_from_timing(timing);
+ max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+
+ if (req_bw <= max_bw) {
+ /* remember the biggest mode here, during
+ * initial link training (to get
+ * verified_link_cap), LS sends event about
+ * cannot train at reported cap to upper
+ * layer and upper layer will re-enumerate modes.
+ * this is not necessary if the lower
+ * verified_link_cap is enough to drive
+ * all the modes */
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /* if (flags.DYNAMIC_VALIDATION == 1)
+ dpsst->max_req_bw_for_verified_linkcap = dal_max(
+ dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+ return true;
+ } else
+ return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+
+ struct dc_link_settings initial_link_setting = {
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+ struct dc_link_settings current_link_setting =
+ initial_link_setting;
+ struct dc_link *link;
+ uint32_t req_bw;
+ uint32_t link_bw;
+
+ req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->sink->link;
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (is_mst_supported(link)) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = bandwidth_in_kbps_from_link_settings(
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return;
+ }
+
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+
+ BREAK_TO_DEBUGGER();
+ ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
+
+ *link_setting = link->verified_link_cap;
+}
+
+/*************************Short Pulse IRQ***************************/
+
+static bool hpd_rx_irq_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state = 0;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+
+ /*1. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Link Status changed.\n", __func__);
+
+ return_code = true;
+
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
+ }
+ }
+
+ return return_code;
+}
+
+static enum dc_status read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ */
+ return core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+}
+
+static bool allow_hpd_rx_irq(const struct dc_link *link)
+{
+ /*
+ * Don't handle RX IRQ unless one of following is met:
+ * 1) The link is established (cur_link_settings != unknown)
+ * 2) We kicked off MST detection
+ * 3) We know we're dealing with an active dongle
+ */
+
+ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ (link->type == dc_connection_mst_branch) ||
+ is_dp_active_dongle(link))
+ return true;
+
+ return false;
+}
+
+static bool handle_hpd_irq_psr_sink(const struct dc_link *link)
+{
+ union dpcd_psr_configuration psr_configuration;
+
+ if (!link->psr_enabled)
+ return false;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 368,/*DpcdAddress_PSR_Enable_Cfg*/
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+
+ if (psr_configuration.bits.ENABLE) {
+ unsigned char dpcdbuf[3] = {0};
+ union psr_error_status psr_error_status;
+ union psr_sink_psr_status psr_sink_psr_status;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 0x2006, /*DpcdAddress_PSR_Error_Status*/
+ (unsigned char *) dpcdbuf,
+ sizeof(dpcdbuf));
+
+ /*DPCD 2006h ERROR STATUS*/
+ psr_error_status.raw = dpcdbuf[0];
+ /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
+ psr_sink_psr_status.raw = dpcdbuf[2];
+
+ if (psr_error_status.bits.LINK_CRC_ERROR ||
+ psr_error_status.bits.RFB_STORAGE_ERROR) {
+ /* Acknowledge and clear error bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 8198,/*DpcdAddress_PSR_Error_Status*/
+ &psr_error_status.raw,
+ sizeof(psr_error_status.raw));
+
+ /* PSR error, disable and re-enable PSR */
+ dc_link_set_psr_enable(link, false, true);
+ dc_link_set_psr_enable(link, true, true);
+
+ return true;
+ } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
+ /* No error is detect, PSR is active.
+ * We should return with IRQ_HPD handled without
+ * checking for loss of sync since PSR would have
+ * powered down main link.
+ */
+ return true;
+ }
+ }
+ return false;
+}
+
+static void dp_test_send_link_training(struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LANE_COUNT,
+ (unsigned char *)(&link_settings.lane_count),
+ 1);
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LINK_RATE,
+ (unsigned char *)(&link_settings.link_rate),
+ 1);
+
+ /* Set preferred link settings */
+ link->verified_link_cap.lane_count = link_settings.lane_count;
+ link->verified_link_cap.link_rate = link_settings.link_rate;
+
+ dp_retrain_link_dp_test(link, &link_settings, false);
+}
+
+/* TODO hbr2 compliance eye output is unstable
+ * (toggling on and off) with debugger break
+ * This caueses intermittent PHY automation failure
+ * Need to look into the root cause */
+static uint8_t force_tps4_for_cp2520 = 1;
+
+static void dp_test_send_phy_test_pattern(struct dc_link *link)
+{
+ union phy_test_pattern dpcd_test_pattern;
+ union lane_adjust dpcd_lane_adjustment[2];
+ unsigned char dpcd_post_cursor_2_adjustment = 0;
+ unsigned char test_80_bit_pattern[
+ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+ enum dp_test_pattern test_pattern;
+ struct dc_link_training_settings link_settings;
+ union lane_adjust dpcd_lane_adjust;
+ unsigned int lane;
+ struct link_training_settings link_training_settings;
+ int i = 0;
+
+ dpcd_test_pattern.raw = 0;
+ memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
+ memset(&link_settings, 0, sizeof(link_settings));
+
+ /* get phy test pattern and pattern parameters from DP receiver */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PHY_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_LANE0_1,
+ &dpcd_lane_adjustment[0].raw,
+ sizeof(dpcd_lane_adjustment));
+
+ /*get post cursor 2 parameters
+ * For DP 1.1a or eariler, this DPCD register's value is 0
+ * For DP 1.2 or later:
+ * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
+ * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
+ */
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_POST_CURSOR2,
+ &dpcd_post_cursor_2_adjustment,
+ sizeof(dpcd_post_cursor_2_adjustment));
+
+ /* translate request */
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case PHY_TEST_PATTERN_D10_2:
+ test_pattern = DP_TEST_PATTERN_D102;
+ break;
+ case PHY_TEST_PATTERN_SYMBOL_ERROR:
+ test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case PHY_TEST_PATTERN_PRBS7:
+ test_pattern = DP_TEST_PATTERN_PRBS7;
+ break;
+ case PHY_TEST_PATTERN_80BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_CP2520_1:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (force_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_2:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (force_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM)
+ core_link_read_dpcd(
+ link,
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ test_80_bit_pattern,
+ sizeof(test_80_bit_pattern));
+
+ /* prepare link training settings */
+ link_settings.link = link->cur_link_settings;
+
+ for (lane = 0; lane <
+ (unsigned int)(link->cur_link_settings.lane_count);
+ lane++) {
+ dpcd_lane_adjust.raw =
+ get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+ link_settings.lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_settings.lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_settings.lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ }
+
+ for (i = 0; i < 4; i++)
+ link_training_settings.lane_settings[i] =
+ link_settings.lane_settings[i];
+ link_training_settings.link_settings = link_settings.link;
+ link_training_settings.allow_invalid_msa_timing_param = false;
+ /*Usage: Measure DP physical lane signal
+ * by DP SI test equipment automatically.
+ * PHY test pattern request is generated by equipment via HPD interrupt.
+ * HPD needs to be active all the time. HPD should be active
+ * all the time. Do not touch it.
+ * forward request to DS
+ */
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ &link_training_settings,
+ test_80_bit_pattern,
+ (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1);
+}
+
+static void dp_test_send_link_test_pattern(struct dc_link *link)
+{
+ union link_test_pattern dpcd_test_pattern;
+ union test_misc dpcd_test_params;
+ enum dp_test_pattern test_pattern;
+
+ memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+ memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+
+ /* get link test pattern and pattern parameters */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_TEST_MISC0,
+ &dpcd_test_params.raw,
+ sizeof(dpcd_test_params));
+
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case LINK_TEST_PATTERN_COLOR_RAMP:
+ test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+ break;
+ case LINK_TEST_PATTERN_VERTICAL_BARS:
+ test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+ break; /* black and white */
+ case LINK_TEST_PATTERN_COLOR_SQUARES:
+ test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+ TEST_DYN_RANGE_VESA ?
+ DP_TEST_PATTERN_COLOR_SQUARES :
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ NULL,
+ NULL,
+ 0);
+}
+
+static void handle_automated_test(struct dc_link *link)
+{
+ union test_request test_request;
+ union test_response test_response;
+
+ memset(&test_request, 0, sizeof(test_request));
+ memset(&test_response, 0, sizeof(test_response));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+ if (test_request.bits.LINK_TRAINING) {
+ /* ACK first to let DP RX test box monitor LT sequence */
+ test_response.bits.ACK = 1;
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ dp_test_send_link_training(link);
+ /* no acknowledge request is needed again */
+ test_response.bits.ACK = 0;
+ }
+ if (test_request.bits.LINK_TEST_PATTRN) {
+ dp_test_send_link_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+ if (test_request.bits.PHY_TEST_PATTERN) {
+ dp_test_send_phy_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+ if (!test_request.raw)
+ /* no requests, revert all test signals
+ * TODO: revert all test signals
+ */
+ test_response.bits.ACK = 1;
+ /* send request acknowledgment */
+ if (test_response.bits.ACK)
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data)
+{
+ union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+ union device_service_irq device_service_clear = { { 0 } };
+ enum dc_status result = DDC_RESULT_UNKNOWN;
+ bool status = false;
+ /* For use cases related to down stream connection status change,
+ * PSR and device auto test, refer to function handle_sst_hpd_irq
+ * in DAL2.1*/
+
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Got short pulse HPD on link %d\n",
+ __func__, link->link_index);
+
+
+ /* All the "handle_hpd_irq_xxx()" methods
+ * should be called only after
+ * dal_dpsst_ls_read_hpd_irq_data
+ * Order of calls is important too
+ */
+ result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+ if (out_hpd_irq_dpcd_data)
+ *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
+
+ if (result != DC_OK) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: DPCD read failed to obtain irq data\n",
+ __func__);
+ return false;
+ }
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ device_service_clear.bits.AUTOMATED_TEST = 1;
+ core_link_write_dpcd(
+ link,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &device_service_clear.raw,
+ sizeof(device_service_clear.raw));
+ device_service_clear.raw = 0;
+ handle_automated_test(link);
+ return false;
+ }
+
+ if (!allow_hpd_rx_irq(link)) {
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: skipping HPD handling on %d\n",
+ __func__, link->link_index);
+ return false;
+ }
+
+ if (handle_hpd_irq_psr_sink(link))
+ /* PSR-related error was detected and handled */
+ return true;
+
+ /* If PSR-related error handled, Main link may be off,
+ * so do not handle as a normal sink status change interrupt.
+ */
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+ return true;
+
+ /* check if we have MST msg and return since we poll for it */
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY)
+ return false;
+
+ /* For now we only handle 'Downstream port status' case.
+ * If we got sink count changed it means
+ * Downstream port status changed,
+ * then DM should call DC to do the detection. */
+ if (hpd_rx_irq_check_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
+ /* Connectivity log: link loss */
+ CONN_DATA_LINK_LOSS(link,
+ hpd_irq_dpcd_data.raw,
+ sizeof(hpd_irq_dpcd_data),
+ "Status: ");
+
+ perform_link_training_with_retries(link,
+ &link->cur_link_settings,
+ true, LINK_TRAINING_ATTEMPTS);
+
+ status = false;
+ }
+
+ if (link->type == dc_connection_active_dongle &&
+ hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+ != link->dpcd_sink_count)
+ status = true;
+
+ /* reasons for HPD RX:
+ * 1. Link Loss - ie Re-train the Link
+ * 2. MST sideband message
+ * 3. Automated Test - ie. Internal Commit
+ * 4. CP (copy protection) - (not interesting for DM???)
+ * 5. DRR
+ * 6. Downstream Port status changed
+ * -ie. Detect - this the only one
+ * which is interesting for DM because
+ * it must call dc_link_detect.
+ */
+ return status;
+}
+
+/*query dpcd for version and mst cap addresses*/
+bool is_mst_supported(struct dc_link *link)
+{
+ bool mst = false;
+ enum dc_status st = DC_OK;
+ union dpcd_rev rev;
+ union mstm_cap cap;
+
+ rev.raw = 0;
+ cap.raw = 0;
+
+ st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
+ sizeof(rev));
+
+ if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+ st = core_link_read_dpcd(link, DP_MSTM_CAP,
+ &cap.raw, sizeof(cap));
+ if (st == DC_OK && cap.bits.MST_CAP == 1)
+ mst = true;
+ }
+ return mst;
+
+}
+
+bool is_dp_active_dongle(const struct dc_link *link)
+{
+ enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
+
+ return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+ switch (bpc) {
+ case DOWN_STREAM_MAX_8BPC:
+ return 8;
+ case DOWN_STREAM_MAX_10BPC:
+ return 10;
+ case DOWN_STREAM_MAX_12BPC:
+ return 12;
+ case DOWN_STREAM_MAX_16BPC:
+ return 16;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static void get_active_converter_info(
+ uint8_t data, struct dc_link *link)
+{
+ union dp_downstream_port_present ds_port = { .byte = data };
+
+ /* decode converter info*/
+ if (!ds_port.fields.PORT_PRESENT) {
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ ddc_service_set_dongle_type(link->ddc,
+ link->dpcd_caps.dongle_type);
+ return;
+ }
+
+ switch (ds_port.fields.PORT_TYPE) {
+ case DOWNSTREAM_VGA:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWNSTREAM_DVI_HDMI:
+ /* At this point we don't know is it DVI or HDMI,
+ * assume DVI.*/
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ default:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
+ }
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
+ uint8_t det_caps[4];
+ union dwnstream_port_caps_byte0 *port_caps =
+ (union dwnstream_port_caps_byte0 *)det_caps;
+ core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
+ det_caps, sizeof(det_caps));
+
+ switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+ case DOWN_STREAM_DETAILED_VGA:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_DVI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_HDMI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+ link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
+ if (ds_port.fields.DETAILED_CAPS) {
+
+ union dwnstream_port_caps_byte3_hdmi
+ hdmi_caps = {.raw = det_caps[3] };
+ union dwnstream_port_caps_byte1
+ hdmi_color_caps = {.raw = det_caps[2] };
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
+ det_caps[1] * 25000;
+
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
+ hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+ hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+ hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+ hdmi_caps.bits.YCrCr422_CONVERSION;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+ hdmi_caps.bits.YCrCr420_CONVERSION;
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
+ translate_dpcd_max_bpc(
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
+
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ break;
+ }
+ }
+
+ ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
+
+ {
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+ }
+
+ {
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.branch_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+ }
+}
+
+static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+ int length)
+{
+ int retry = 0;
+ union dp_downstream_port_present ds_port = { 0 };
+
+ if (!link->dpcd_caps.dpcd_rev.raw) {
+ do {
+ dp_receiver_power_ctrl(link, true);
+ core_link_read_dpcd(link, DP_DPCD_REV,
+ dpcd_data, length);
+ link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+ DP_DPCD_REV -
+ DP_DPCD_REV];
+ } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+ }
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+ switch (link->dpcd_caps.branch_dev_id) {
+ /* Some active dongles (DP-VGA, DP-DLDVI converters) power down
+ * all internal circuits including AUX communication preventing
+ * reading DPCD table and EDID (spec violation).
+ * Encoder will skip DP RX power down on disable_output to
+ * keep receiver powered all the time.*/
+ case DP_BRANCH_DEVICE_ID_1:
+ case DP_BRANCH_DEVICE_ID_4:
+ link->wa_flags.dp_keep_receiver_powered = true;
+ break;
+
+ /* TODO: May need work around for other dongles. */
+ default:
+ link->wa_flags.dp_keep_receiver_powered = false;
+ break;
+ }
+ } else
+ link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+static void retrieve_link_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1];
+
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+ union dp_downstream_port_present ds_port = { 0 };
+
+ memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(&down_strm_port_count,
+ '\0', sizeof(union down_stream_port_count));
+ memset(&edp_config_cap, '\0',
+ sizeof(union edp_configuration_cap));
+
+ core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+
+ {
+ union training_aux_rd_interval aux_rd_interval;
+
+ aux_rd_interval.raw =
+ dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
+
+ if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+ core_link_read_dpcd(
+ link,
+ DP_DP13_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ }
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ link->test_pattern_enabled = false;
+ link->compliance_test_state.raw = 0;
+
+ /* read sink count */
+ core_link_read_dpcd(link,
+ DP_SINK_COUNT,
+ &link->dpcd_caps.sink_count.raw,
+ sizeof(link->dpcd_caps.sink_count.raw));
+
+ /* Connectivity log: detection */
+ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+}
+
+void detect_dp_sink_caps(struct dc_link *link)
+{
+ retrieve_link_cap(link);
+
+ /* dc init_hw has power encoder using default
+ * signal for connector. For native DP, no
+ * need to power up encoder again. If not native
+ * DP, hw_init may need check signal or power up
+ * encoder here.
+ */
+ /* TODO save sink caps in link->sink */
+}
+
+void detect_edp_sink_caps(struct dc_link *link)
+{
+ retrieve_link_cap(link);
+ link->verified_link_cap = link->reported_link_cap;
+}
+
+void dc_link_dp_enable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->enable_hpd(encoder);
+}
+
+void dc_link_dp_disable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->disable_hpd(encoder);
+}
+
+static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
+{
+ if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+ return true;
+ else
+ return false;
+}
+
+static void set_crtc_test_pattern(struct dc_link *link,
+ struct pipe_ctx *pipe_ctx,
+ enum dp_test_pattern test_pattern)
+{
+ enum controller_dp_test_pattern controller_test_pattern;
+ enum dc_color_depth color_depth = pipe_ctx->
+ stream->timing.display_color_depth;
+ struct bit_depth_reduction_params params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ break;
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
+ break;
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
+ break;
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
+ break;
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
+ break;
+ default:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ break;
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ {
+ /* disable bit depth reduction */
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ controller_test_pattern, color_depth);
+ }
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ {
+ /* restore bitdepth reduction */
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &params);
+ pipe_ctx->stream->bit_depth_params = params;
+ pipe_ctx->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &pipes[0];
+ unsigned int lane;
+ unsigned int i;
+ unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
+ union dpcd_training_pattern training_pattern;
+ enum dpcd_phy_test_patterns pattern;
+
+ memset(&training_pattern, 0, sizeof(training_pattern));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream->sink->link == link) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ /* Reset CRTC Test Pattern if it is currently running and request
+ * is VideoMode Reset DP Phy Test Pattern if it is currently running
+ * and request is VideoMode
+ */
+ if (link->test_pattern_enabled && test_pattern ==
+ DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set CRTC Test Pattern */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ dp_set_hw_test_pattern(link, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ /* Unblank Stream */
+ link->dc->hwss.unblank_stream(
+ pipe_ctx,
+ &link->verified_link_cap);
+ /* TODO:m_pHwss->MuteAudioEndpoint
+ * (pPathMode->pDisplayPath, false);
+ */
+
+ /* Reset Test Pattern state */
+ link->test_pattern_enabled = false;
+
+ return true;
+ }
+
+ /* Check for PHY Test Patterns */
+ if (is_dp_phy_pattern(test_pattern)) {
+ /* Set DPCD Lane Settings before running test pattern */
+ if (p_link_settings != NULL) {
+ dp_set_hw_lane_settings(link, p_link_settings);
+ dpcd_set_lane_settings(link, p_link_settings);
+ }
+
+ /* Blank stream if running test pattern */
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /*TODO:
+ * m_pHwss->
+ * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
+ */
+ /* Blank stream */
+ pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ }
+
+ dp_set_hw_test_pattern(link, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ if (p_link_settings != NULL)
+ dpcd_set_link_settings(link,
+ p_link_settings);
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ pattern = PHY_TEST_PATTERN_NONE;
+ break;
+ case DP_TEST_PATTERN_D102:
+ pattern = PHY_TEST_PATTERN_D10_2;
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ pattern = PHY_TEST_PATTERN_PRBS7;
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ pattern = PHY_TEST_PATTERN_CP2520_1;
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ pattern = PHY_TEST_PATTERN_CP2520_2;
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ pattern = PHY_TEST_PATTERN_CP2520_3;
+ break;
+ default:
+ return false;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
+ /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
+ return false;
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.2 or later - DP receiver's link quality
+ * pattern is set using DPCD LINK_QUAL_LANEx_SET
+ * register (0x10B~0x10E)\
+ */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
+ link_qual_pattern[lane] =
+ (unsigned char)(pattern);
+
+ core_link_write_dpcd(link,
+ DP_LINK_QUAL_LANE0_SET,
+ link_qual_pattern,
+ sizeof(link_qual_pattern));
+ } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
+ link->dpcd_caps.dpcd_rev.raw == 0) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.1a or earlier - DP receiver's link
+ * quality pattern is set using
+ * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
+ * register (0x102). We will use v_1.3 when we are
+ * setting test pattern for DP 1.1.
+ */
+ core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ }
+ } else {
+ /* CRTC Patterns */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern);
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ }
+
+ return true;
+}
+
+void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
+{
+ unsigned char mstmCntl;
+
+ core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+ if (enable)
+ mstmCntl |= DP_MST_EN;
+ else
+ mstmCntl &= (~DP_MST_EN);
+
+ core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
new file mode 100644
index 000000000000..9a33b471270a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -0,0 +1,331 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+
+
+#include "dm_services.h"
+#include "dc.h"
+#include "inc/core_types.h"
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+#include "link_hwss.h"
+#include "hw_sequencer.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dpcd_defs.h"
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size)
+{
+ if (!dm_helpers_dp_read_dpcd(link->ctx,
+ link,
+ address, data, size))
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size)
+{
+ if (!dm_helpers_dp_write_dpcd(link->ctx,
+ link,
+ address, data, size))
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+ uint8_t state;
+
+ state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+ core_link_write_dpcd(link, DP_SET_POWER, &state,
+ sizeof(state));
+}
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc = link->link_enc;
+
+ struct pipe_ctx *pipes =
+ link->dc->current_state->res_ctx.pipe_ctx;
+ struct clock_source *dp_cs =
+ link->dc->res_pool->dp_clock_source;
+ unsigned int i;
+ /* If the current pixel clock source is not DTO(happens after
+ * switching from HDMI passive dongle to DP on the same connector),
+ * switch the pixel clock source to DTO.
+ */
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->sink != NULL &&
+ pipes[i].stream->sink->link == link) {
+ if (pipes[i].clock_source != NULL &&
+ pipes[i].clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ pipes[i].clock_source = dp_cs;
+ pipes[i].stream_res.pix_clk_params.requested_pix_clk =
+ pipes[i].stream->timing.pix_clk_khz;
+ pipes[i].clock_source->funcs->program_pix_clk(
+ pipes[i].clock_source,
+ &pipes[i].stream_res.pix_clk_params,
+ &pipes[i].pll_settings);
+ }
+ }
+ }
+
+ if (dc_is_dp_sst_signal(signal)) {
+ if (signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_power_control(link->link_enc, true);
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ link->dc->hwss.edp_backlight_control(link, true);
+ } else
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ } else {
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+
+ dp_receiver_power_ctrl(link, true);
+}
+
+static bool edp_receiver_ready_T9(struct dc_link *link)
+{
+ unsigned int tries = 0;
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+ if (edpRev < DP_EDP_12)
+ return true;
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ return result;
+}
+
+void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
+{
+ if (!link->wa_flags.dp_keep_receiver_powered)
+ dp_receiver_power_ctrl(link, false);
+
+ if (signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, false);
+ edp_receiver_ready_T9(link);
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+ link->dc->hwss.edp_power_control(link->link_enc, false);
+ } else
+ link->link_enc->funcs->disable_output(link->link_enc, signal, link);
+
+ /* Clear current link setting.*/
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+}
+
+void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)
+{
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count > 0)
+ return;
+
+ dp_disable_link_phy(link, signal);
+
+ /* set the sink to SST mode after disabling the link */
+ dp_enable_mst_on_sink(link, false);
+}
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern)
+{
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+ switch (pattern) {
+ case HW_DP_TRAINING_PATTERN_1:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+ break;
+ case HW_DP_TRAINING_PATTERN_2:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+ break;
+ case HW_DP_TRAINING_PATTERN_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+ break;
+ case HW_DP_TRAINING_PATTERN_4:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ default:
+ break;
+ }
+
+ dp_set_hw_test_pattern(link, test_pattern, NULL, 0);
+
+ return true;
+}
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_settings)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ /* call Encoder to set lane settings */
+ encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
+{
+ /* We need to explicitly check that connector
+ * is not DP. Some Travis_VGA get reported
+ * by video bios as DP.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+ switch (link->dpcd_caps.branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_2:
+ if (strncmp(
+ link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_2,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ case DP_BRANCH_DEVICE_ID_3:
+ if (strncmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_3,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (link->dpcd_caps.panel_mode_edp) {
+ return DP_PANEL_MODE_EDP;
+ }
+
+ return DP_PANEL_MODE_DEFAULT;
+}
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size)
+{
+ struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+ struct link_encoder *encoder = link->link_enc;
+
+ pattern_param.dp_phy_pattern = test_pattern;
+ pattern_param.custom_pattern = custom_pattern;
+ pattern_param.custom_pattern_size = custom_pattern_size;
+ pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+ encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+}
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ struct pipe_ctx *pipes =
+ &link->dc->current_state->res_ctx.pipe_ctx[0];
+ unsigned int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream != NULL &&
+ pipes[i].stream->sink != NULL &&
+ pipes[i].stream->sink->link != NULL &&
+ pipes[i].stream_res.stream_enc != NULL &&
+ pipes[i].stream->sink->link == link) {
+ udelay(100);
+
+ pipes[i].stream_res.stream_enc->funcs->dp_blank(
+ pipes[i].stream_res.stream_enc);
+
+ /* disable any test pattern that might be active */
+ dp_set_hw_test_pattern(link,
+ DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ dp_receiver_power_ctrl(link, false);
+
+ link->dc->hwss.disable_stream(&pipes[i], KEEP_ACQUIRED_RESOURCE);
+
+ link->link_enc->funcs->disable_output(
+ link->link_enc,
+ SIGNAL_TYPE_DISPLAY_PORT,
+ link);
+
+ /* Clear current link setting. */
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+
+ link->link_enc->funcs->enable_dp_output(
+ link->link_enc,
+ link_setting,
+ pipes[i].clock_source->id);
+
+ dp_receiver_power_ctrl(link, true);
+
+ perform_link_training_with_retries(
+ link,
+ link_setting,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS);
+
+ link->cur_link_settings = *link_setting;
+
+ link->dc->hwss.enable_stream(&pipes[i]);
+
+ link->dc->hwss.unblank_stream(&pipes[i],
+ link_setting);
+
+ if (pipes[i].stream_res.audio) {
+ /* notify audio driver for
+ * audio modes of monitor */
+ pipes[i].stream_res.audio->funcs->az_enable(
+ pipes[i].stream_res.audio);
+
+ /* un-mute audio */
+ /* TODO: audio should be per stream rather than
+ * per link */
+ pipes[i].stream_res.stream_enc->funcs->
+ audio_mute_control(
+ pipes[i].stream_res.stream_enc, false);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
new file mode 100644
index 000000000000..b7422d3b71ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -0,0 +1,2807 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "opp.h"
+#include "timing_generator.h"
+#include "transform.h"
+#include "dpp.h"
+#include "core_types.h"
+#include "set_mode_types.h"
+#include "virtual/virtual_stream_encoder.h"
+
+#include "dce80/dce80_resource.h"
+#include "dce100/dce100_resource.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_resource.h"
+#endif
+#include "dce120/dce120_resource.h"
+
+enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
+{
+ enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ switch (asic_id.chip_family) {
+
+ case FAMILY_CI:
+ dc_version = DCE_VERSION_8_0;
+ break;
+ case FAMILY_KV:
+ if (ASIC_REV_IS_KALINDI(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_BHAVANI(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_GODAVARI(asic_id.hw_internal_rev))
+ dc_version = DCE_VERSION_8_3;
+ else
+ dc_version = DCE_VERSION_8_1;
+ break;
+ case FAMILY_CZ:
+ dc_version = DCE_VERSION_11_0;
+ break;
+
+ case FAMILY_VI:
+ if (ASIC_REV_IS_TONGA_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_FIJI_P(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_10_0;
+ break;
+ }
+ if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) {
+ dc_version = DCE_VERSION_11_2;
+ }
+ break;
+ case FAMILY_AI:
+ dc_version = DCE_VERSION_12_0;
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case FAMILY_RV:
+ dc_version = DCN_VERSION_1_0;
+ break;
+#endif
+ default:
+ dc_version = DCE_VERSION_UNKNOWN;
+ break;
+ }
+ return dc_version;
+}
+
+struct resource_pool *dc_create_resource_pool(
+ struct dc *dc,
+ int num_virtual_links,
+ enum dce_version dc_version,
+ struct hw_asic_id asic_id)
+{
+ struct resource_pool *res_pool = NULL;
+
+ switch (dc_version) {
+ case DCE_VERSION_8_0:
+ res_pool = dce80_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_8_1:
+ res_pool = dce81_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_8_3:
+ res_pool = dce83_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_10_0:
+ res_pool = dce100_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_11_0:
+ res_pool = dce110_create_resource_pool(
+ num_virtual_links, dc, asic_id);
+ break;
+ case DCE_VERSION_11_2:
+ res_pool = dce112_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+ case DCE_VERSION_12_0:
+ res_pool = dce120_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ res_pool = dcn10_create_resource_pool(
+ num_virtual_links, dc);
+ break;
+#endif
+
+
+ default:
+ break;
+ }
+ if (res_pool != NULL) {
+ struct dc_firmware_info fw_info = { { 0 } };
+
+ if (dc->ctx->dc_bios->funcs->get_firmware_info(
+ dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
+ res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+ } else
+ ASSERT_CRITICAL(false);
+ }
+
+ return res_pool;
+}
+
+void dc_destroy_resource_pool(struct dc *dc)
+{
+ if (dc) {
+ if (dc->res_pool)
+ dc->res_pool->funcs->destroy(&dc->res_pool);
+
+ kfree(dc->hwseq);
+ }
+}
+
+static void update_num_audio(
+ const struct resource_straps *straps,
+ unsigned int *num_audio,
+ struct audio_support *aud_support)
+{
+ aud_support->dp_audio = true;
+ aud_support->hdmi_audio_native = false;
+ aud_support->hdmi_audio_on_dongle = false;
+
+ if (straps->hdmi_disable == 0) {
+ if (straps->dc_pinstraps_audio & 0x2) {
+ aud_support->hdmi_audio_on_dongle = true;
+ aud_support->hdmi_audio_native = true;
+ }
+ }
+
+ switch (straps->audio_stream_number) {
+ case 0: /* multi streams supported */
+ break;
+ case 1: /* multi streams not supported */
+ *num_audio = 1;
+ break;
+ default:
+ DC_ERR("DC: unexpected audio fuse!\n");
+ }
+}
+
+bool resource_construct(
+ unsigned int num_virtual_links,
+ struct dc *dc,
+ struct resource_pool *pool,
+ const struct resource_create_funcs *create_funcs)
+{
+ struct dc_context *ctx = dc->ctx;
+ const struct resource_caps *caps = pool->res_cap;
+ int i;
+ unsigned int num_audio = caps->num_audio;
+ struct resource_straps straps = {0};
+
+ if (create_funcs->read_dce_straps)
+ create_funcs->read_dce_straps(dc->ctx, &straps);
+
+ pool->audio_count = 0;
+ if (create_funcs->create_audio) {
+ /* find the total number of streams available via the
+ * AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+ * registers (one for each pin) starting from pin 1
+ * up to the max number of audio pins.
+ * We stop on the first pin where
+ * PORT_CONNECTIVITY == 1 (as instructed by HW team).
+ */
+ update_num_audio(&straps, &num_audio, &pool->audio_support);
+ for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+ struct audio *aud = create_funcs->create_audio(ctx, i);
+
+ if (aud == NULL) {
+ DC_ERR("DC: failed to create audio!\n");
+ return false;
+ }
+
+ if (!aud->funcs->endpoint_valid(aud)) {
+ aud->funcs->destroy(&aud);
+ break;
+ }
+
+ pool->audios[i] = aud;
+ pool->audio_count++;
+ }
+ }
+
+ pool->stream_enc_count = 0;
+ if (create_funcs->create_stream_encoder) {
+ for (i = 0; i < caps->num_stream_encoder; i++) {
+ pool->stream_enc[i] = create_funcs->create_stream_encoder(i, ctx);
+ if (pool->stream_enc[i] == NULL)
+ DC_ERR("DC: failed to create stream_encoder!\n");
+ pool->stream_enc_count++;
+ }
+ }
+ dc->caps.dynamic_audio = false;
+ if (pool->audio_count < pool->stream_enc_count) {
+ dc->caps.dynamic_audio = true;
+ }
+ for (i = 0; i < num_virtual_links; i++) {
+ pool->stream_enc[pool->stream_enc_count] =
+ virtual_stream_encoder_create(
+ ctx, ctx->dc_bios);
+ if (pool->stream_enc[pool->stream_enc_count] == NULL) {
+ DC_ERR("DC: failed to create stream_encoder!\n");
+ return false;
+ }
+ pool->stream_enc_count++;
+ }
+
+ dc->hwseq = create_funcs->create_hwseq(ctx);
+
+ return true;
+}
+
+
+void resource_unreference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != clock_source)
+ continue;
+
+ res_ctx->clock_source_ref_count[i]--;
+
+ break;
+ }
+
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count--;
+}
+
+void resource_reference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] != clock_source)
+ continue;
+
+ res_ctx->clock_source_ref_count[i]++;
+ break;
+ }
+
+ if (pool->dp_clock_source == clock_source)
+ res_ctx->dp_clock_source_ref_count++;
+}
+
+bool resource_are_streams_timing_synchronizable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+{
+ if (stream1->timing.h_total != stream2->timing.h_total)
+ return false;
+
+ if (stream1->timing.v_total != stream2->timing.v_total)
+ return false;
+
+ if (stream1->timing.h_addressable
+ != stream2->timing.h_addressable)
+ return false;
+
+ if (stream1->timing.v_addressable
+ != stream2->timing.v_addressable)
+ return false;
+
+ if (stream1->timing.pix_clk_khz
+ != stream2->timing.pix_clk_khz)
+ return false;
+
+ if (stream1->phy_pix_clk != stream2->phy_pix_clk
+ && (!dc_is_dp_signal(stream1->signal)
+ || !dc_is_dp_signal(stream2->signal)))
+ return false;
+
+ return true;
+}
+
+static bool is_sharable_clk_src(
+ const struct pipe_ctx *pipe_with_clk_src,
+ const struct pipe_ctx *pipe)
+{
+ if (pipe_with_clk_src->clock_source == NULL)
+ return false;
+
+ if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ return false;
+
+ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
+ && dc_is_dvi_signal(pipe->stream->signal))
+ return false;
+
+ if (dc_is_hdmi_signal(pipe->stream->signal)
+ && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+ return false;
+
+ if (!resource_are_streams_timing_synchronizable(
+ pipe_with_clk_src->stream, pipe->stream))
+ return false;
+
+ return true;
+}
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (is_sharable_clk_src(&res_ctx->pipe_ctx[i], pipe_ctx))
+ return res_ctx->pipe_ctx[i].clock_source;
+ }
+
+ return NULL;
+}
+
+static enum pixel_format convert_pixel_format_to_dalsurface(
+ enum surface_pixel_format surface_pixel_format)
+{
+ enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+
+ switch (surface_pixel_format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ dal_pixel_format = PIXEL_FORMAT_INDEX8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ dal_pixel_format = PIXEL_FORMAT_RGB565;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ dal_pixel_format = PIXEL_FORMAT_RGB565;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ dal_pixel_format = PIXEL_FORMAT_FP16;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ dal_pixel_format = PIXEL_FORMAT_420BPP8;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ dal_pixel_format = PIXEL_FORMAT_420BPP10;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ default:
+ dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+ break;
+ }
+ return dal_pixel_format;
+}
+
+static void rect_swap_helper(struct rect *rect)
+{
+ uint32_t temp = 0;
+
+ temp = rect->height;
+ rect->height = rect->width;
+ rect->width = temp;
+
+ temp = rect->x;
+ rect->x = rect->y;
+ rect->y = temp;
+}
+
+static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect surf_src = plane_state->src_rect;
+ struct rect clip = { 0 };
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool pri_split = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ pri_split = false;
+ sec_split = false;
+ }
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ /* The actual clip is an intersection between stream
+ * source and surface clip
+ */
+ clip.x = stream->src.x > plane_state->clip_rect.x ?
+ stream->src.x : plane_state->clip_rect.x;
+
+ clip.width = stream->src.x + stream->src.width <
+ plane_state->clip_rect.x + plane_state->clip_rect.width ?
+ stream->src.x + stream->src.width - clip.x :
+ plane_state->clip_rect.x + plane_state->clip_rect.width - clip.x ;
+
+ clip.y = stream->src.y > plane_state->clip_rect.y ?
+ stream->src.y : plane_state->clip_rect.y;
+
+ clip.height = stream->src.y + stream->src.height <
+ plane_state->clip_rect.y + plane_state->clip_rect.height ?
+ stream->src.y + stream->src.height - clip.y :
+ plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
+
+ /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
+ * num_pixels = clip.num_pix * scl_ratio
+ */
+ data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
+ surf_src.width / plane_state->dst_rect.width;
+ data->viewport.width = clip.width *
+ surf_src.width / plane_state->dst_rect.width;
+
+ data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
+ surf_src.height / plane_state->dst_rect.height;
+ data->viewport.height = clip.height *
+ surf_src.height / plane_state->dst_rect.height;
+
+ /* Round down, compensate in init */
+ data->viewport_c.x = data->viewport.x / vpc_div;
+ data->viewport_c.y = data->viewport.y / vpc_div;
+ data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
+ dal_fixed31_32_half : dal_fixed31_32_zero;
+ data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
+ dal_fixed31_32_half : dal_fixed31_32_zero;
+ /* Round up, assume original video size always even dimensions */
+ data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
+ data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+ /* Handle hsplit */
+ if (pri_split || sec_split) {
+ /* HMirror XOR Secondary_pipe XOR Rotation_180 */
+ bool right_view = (sec_split != plane_state->horizontal_mirror) !=
+ (plane_state->rotation == ROTATION_ANGLE_180);
+
+ if (plane_state->rotation == ROTATION_ANGLE_90
+ || plane_state->rotation == ROTATION_ANGLE_270)
+ /* Secondary_pipe XOR Rotation_270 */
+ right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
+
+ if (right_view) {
+ data->viewport.x += data->viewport.width / 2;
+ data->viewport_c.x += data->viewport_c.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
+ } else {
+ data->viewport.width /= 2;
+ data->viewport_c.width /= 2;
+ }
+ }
+
+ if (plane_state->rotation == ROTATION_ANGLE_90 ||
+ plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+}
+
+static void calculate_recout(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect surf_src = plane_state->src_rect;
+ struct rect surf_clip = plane_state->clip_rect;
+ int recout_full_x, recout_full_y;
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+ if (stream->src.x < surf_clip.x)
+ pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
+ - stream->src.x) * stream->dst.width
+ / stream->src.width;
+
+ pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
+ stream->dst.width / stream->src.width;
+ if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
+ stream->dst.x + stream->dst.width)
+ pipe_ctx->plane_res.scl_data.recout.width =
+ stream->dst.x + stream->dst.width
+ - pipe_ctx->plane_res.scl_data.recout.x;
+
+ pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+ if (stream->src.y < surf_clip.y)
+ pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
+ - stream->src.y) * stream->dst.height
+ / stream->src.height;
+
+ pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
+ stream->dst.height / stream->src.height;
+ if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
+ stream->dst.y + stream->dst.height)
+ pipe_ctx->plane_res.scl_data.recout.height =
+ stream->dst.y + stream->dst.height
+ - pipe_ctx->plane_res.scl_data.recout.y;
+
+ /* Handle h & vsplit */
+ if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
+ pipe_ctx->plane_state) {
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
+ pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
+ /* Floor primary pipe, ceil 2ndary pipe */
+ pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
+ } else {
+ pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+ pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+ }
+ } else if (pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
+ if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+ pipe_ctx->plane_res.scl_data.recout.height /= 2;
+ else
+ pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ }
+
+ /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
+ * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
+ * ratio)
+ */
+ recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width -
+ surf_src.x * plane_state->dst_rect.width / surf_src.width
+ * stream->dst.width / stream->src.width;
+ recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height -
+ surf_src.y * plane_state->dst_rect.height / surf_src.height
+ * stream->dst.height / stream->src.height;
+
+ recout_skip->width = pipe_ctx->plane_res.scl_data.recout.x - recout_full_x;
+ recout_skip->height = pipe_ctx->plane_res.scl_data.recout.y - recout_full_y;
+}
+
+static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct rect surf_src = plane_state->src_rect;
+ const int in_w = stream->src.width;
+ const int in_h = stream->src.height;
+ const int out_w = stream->dst.width;
+ const int out_h = stream->dst.height;
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
+ rect_swap_helper(&surf_src);
+
+ pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction(
+ surf_src.width,
+ plane_state->dst_rect.width);
+ pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction(
+ surf_src.height,
+ plane_state->dst_rect.height);
+
+ if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+ pipe_ctx->plane_res.scl_data.ratios.horz.value *= 2;
+ else if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+ pipe_ctx->plane_res.scl_data.ratios.vert.value *= 2;
+
+ pipe_ctx->plane_res.scl_data.ratios.vert.value = div64_s64(
+ pipe_ctx->plane_res.scl_data.ratios.vert.value * in_h, out_h);
+ pipe_ctx->plane_res.scl_data.ratios.horz.value = div64_s64(
+ pipe_ctx->plane_res.scl_data.ratios.horz.value * in_w, out_w);
+
+ pipe_ctx->plane_res.scl_data.ratios.horz_c = pipe_ctx->plane_res.scl_data.ratios.horz;
+ pipe_ctx->plane_res.scl_data.ratios.vert_c = pipe_ctx->plane_res.scl_data.ratios.vert;
+
+ if (pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP8
+ || pipe_ctx->plane_res.scl_data.format == PIXEL_FORMAT_420BPP10) {
+ pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2;
+ pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2;
+ }
+}
+
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip)
+{
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect src = pipe_ctx->plane_state->src_rect;
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&src);
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+
+ /*
+ * Init calculated according to formula:
+ * init = (scaling_ratio + number_of_taps + 1) / 2
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+ data->inits.h = dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2);
+
+ data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2));
+
+ data->inits.v = dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2);
+
+ data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int(
+ dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2));
+
+
+ /* Adjust for viewport end clip-off */
+ if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.h, data->ratios.horz));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
+ int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.v, data->ratios.vert));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
+ int vp_clip = (src.x + src.width) / vpc_div -
+ data->viewport_c.width - data->viewport_c.x;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ }
+ if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
+ int vp_clip = (src.y + src.height) / vpc_div -
+ data->viewport_c.height - data->viewport_c.y;
+ int int_part = dal_fixed31_32_floor(
+ dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c));
+
+ int_part = int_part > 0 ? int_part : 0;
+ data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
+ }
+
+ /* Adjust for non-0 viewport offset */
+ if (data->viewport.x) {
+ int int_part;
+
+ data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int(
+ data->ratios.horz, recout_skip->width));
+ int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x;
+ if (int_part < data->taps.h_taps) {
+ int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
+ (data->taps.h_taps - int_part) : data->viewport.x;
+ data->viewport.x -= int_adj;
+ data->viewport.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps) {
+ data->viewport.x += int_part - data->taps.h_taps;
+ data->viewport.width -= int_part - data->taps.h_taps;
+ int_part = data->taps.h_taps;
+ }
+ data->inits.h.value &= 0xffffffff;
+ data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part);
+ }
+
+ if (data->viewport_c.x) {
+ int int_part;
+
+ data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int(
+ data->ratios.horz_c, recout_skip->width));
+ int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x;
+ if (int_part < data->taps.h_taps_c) {
+ int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
+ (data->taps.h_taps_c - int_part) : data->viewport_c.x;
+ data->viewport_c.x -= int_adj;
+ data->viewport_c.width += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.h_taps_c) {
+ data->viewport_c.x += int_part - data->taps.h_taps_c;
+ data->viewport_c.width -= int_part - data->taps.h_taps_c;
+ int_part = data->taps.h_taps_c;
+ }
+ data->inits.h_c.value &= 0xffffffff;
+ data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part);
+ }
+
+ if (data->viewport.y) {
+ int int_part;
+
+ data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int(
+ data->ratios.vert, recout_skip->height));
+ int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y;
+ if (int_part < data->taps.v_taps) {
+ int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
+ (data->taps.v_taps - int_part) : data->viewport.y;
+ data->viewport.y -= int_adj;
+ data->viewport.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps) {
+ data->viewport.y += int_part - data->taps.v_taps;
+ data->viewport.height -= int_part - data->taps.v_taps;
+ int_part = data->taps.v_taps;
+ }
+ data->inits.v.value &= 0xffffffff;
+ data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part);
+ }
+
+ if (data->viewport_c.y) {
+ int int_part;
+
+ data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int(
+ data->ratios.vert_c, recout_skip->height));
+ int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y;
+ if (int_part < data->taps.v_taps_c) {
+ int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
+ (data->taps.v_taps_c - int_part) : data->viewport_c.y;
+ data->viewport_c.y -= int_adj;
+ data->viewport_c.height += int_adj;
+ int_part += int_adj;
+ } else if (int_part > data->taps.v_taps_c) {
+ data->viewport_c.y += int_part - data->taps.v_taps_c;
+ data->viewport_c.height -= int_part - data->taps.v_taps_c;
+ int_part = data->taps.v_taps_c;
+ }
+ data->inits.v_c.value &= 0xffffffff;
+ data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part);
+ }
+
+ /* Interlaced inits based on final vert inits */
+ data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert);
+ data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c);
+
+ if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+ pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+ rect_swap_helper(&data->viewport_c);
+ rect_swap_helper(&data->viewport);
+ }
+}
+
+bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct view recout_skip = { 0 };
+ bool res = false;
+
+ /* Important: scaling ratio calculation requires pixel format,
+ * lb depth calculation requires recout and taps require scaling ratios.
+ * Inits require viewport, taps, ratios and recout of split pipe
+ */
+ pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
+ pipe_ctx->plane_state->format);
+
+ calculate_scaling_ratios(pipe_ctx);
+
+ calculate_viewport(pipe_ctx);
+
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
+ return false;
+
+ calculate_recout(pipe_ctx, &recout_skip);
+
+ /**
+ * Setting line buffer pixel depth to 24bpp yields banding
+ * on certain displays, such as the Sharp 4k
+ */
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+
+ pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
+ pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
+
+ pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
+ pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+
+
+ /* Taps calculations */
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+ if (!res) {
+ /* Try 24 bpp linebuffer */
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
+
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
+
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
+ }
+
+ if (res)
+ /* May need to re-check lb size after this in some obscure scenario */
+ calculate_inits_and_adj_vp(pipe_ctx, &recout_skip);
+
+ dm_logger_write(pipe_ctx->stream->ctx->logger, LOG_SCALER,
+ "%s: Viewport:\nheight:%d width:%d x:%d "
+ "y:%d\n dst_rect:\nheight:%d width:%d x:%d "
+ "y:%d\n",
+ __func__,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ plane_state->dst_rect.height,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y);
+
+ return res;
+}
+
+
+enum dc_status resource_build_scaling_params_for_context(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state != NULL &&
+ context->res_ctx.pipe_ctx[i].stream != NULL)
+ if (!resource_build_scaling_params(&context->res_ctx.pipe_ctx[i]))
+ return DC_FAIL_SCALING;
+ }
+
+ return DC_OK;
+}
+
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+ struct pipe_ctx *secondary_pipe = NULL;
+
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+
+ return secondary_pipe;
+}
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ !res_ctx->pipe_ctx[i].top_pipe) {
+ return &res_ctx->pipe_ctx[i];
+ break;
+ }
+ }
+ return NULL;
+}
+
+static struct pipe_ctx *resource_get_tail_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *head_pipe, *tail_pipe;
+ head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+ if (!head_pipe)
+ return NULL;
+
+ tail_pipe = head_pipe->bottom_pipe;
+
+ while (tail_pipe) {
+ head_pipe = tail_pipe;
+ tail_pipe = tail_pipe->bottom_pipe;
+ }
+
+ return head_pipe;
+}
+
+/*
+ * A free_pipe for a stream is defined here as a pipe
+ * that has no surface attached yet
+ */
+static struct pipe_ctx *acquire_free_pipe_for_stream(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct resource_context *res_ctx = &context->res_ctx;
+
+ struct pipe_ctx *head_pipe = NULL;
+
+ /* Find head pipe, which has the back end set up*/
+
+ head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+
+ if (!head_pipe) {
+ ASSERT(0);
+ return NULL;
+ }
+
+ if (!head_pipe->plane_state)
+ return head_pipe;
+
+ /* Re-use pipe already acquired for this stream if available*/
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == stream &&
+ !res_ctx->pipe_ctx[i].plane_state) {
+ return &res_ctx->pipe_ctx[i];
+ }
+ }
+
+ /*
+ * At this point we have no re-useable pipe for this stream and we need
+ * to acquire an idle one to satisfy the request
+ */
+
+ if (!pool->funcs->acquire_idle_pipe_for_layer)
+ return NULL;
+
+ return pool->funcs->acquire_idle_pipe_for_layer(context, pool, stream);
+
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+static int acquire_first_split_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state) {
+ pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+ if (pipe_ctx->bottom_pipe)
+ pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+ memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+ pipe_ctx->stream_res.tg = pool->timing_generators[i];
+ pipe_ctx->plane_res.hubp = pool->hubps[i];
+ pipe_ctx->plane_res.ipp = pool->ipps[i];
+ pipe_ctx->plane_res.dpp = pool->dpps[i];
+ pipe_ctx->stream_res.opp = pool->opps[i];
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->stream = stream;
+ return i;
+ }
+ }
+ return -1;
+}
+#endif
+
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ int i;
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *head_pipe, *tail_pipe, *free_pipe;
+ struct dc_stream_status *stream_status = NULL;
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ return false;
+ }
+
+
+ if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ return false;
+ }
+
+ head_pipe = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!head_pipe) {
+ dm_error("Head pipe not found for stream_state %p !\n", stream);
+ return false;
+ }
+
+ free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (!free_pipe) {
+ int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+ if (pipe_idx >= 0)
+ free_pipe = &context->res_ctx.pipe_ctx[pipe_idx];
+ }
+#endif
+ if (!free_pipe)
+ return false;
+
+ /* retain new surfaces */
+ dc_plane_state_retain(plane_state);
+ free_pipe->plane_state = plane_state;
+
+ if (head_pipe != free_pipe) {
+
+ tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+ ASSERT(tail_pipe);
+
+ free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
+ free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
+ free_pipe->stream_res.stream_enc = tail_pipe->stream_res.stream_enc;
+ free_pipe->stream_res.audio = tail_pipe->stream_res.audio;
+ free_pipe->clock_source = tail_pipe->clock_source;
+ free_pipe->top_pipe = tail_pipe;
+ tail_pipe->bottom_pipe = free_pipe;
+ }
+
+ /* assign new surfaces*/
+ stream_status->plane_states[stream_status->plane_count] = plane_state;
+
+ stream_status->plane_count++;
+
+ return true;
+}
+
+bool dc_remove_plane_from_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context)
+{
+ int i;
+ struct dc_stream_status *stream_status = NULL;
+ struct resource_pool *pool = dc->res_pool;
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to remove plane.\n");
+ return false;
+ }
+
+ /* release pipe for plane*/
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *pipe_ctx;
+
+ if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->top_pipe)
+ pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
+
+ /* Second condition is to avoid setting NULL to top pipe
+ * of tail pipe making it look like head pipe in subsequent
+ * deletes
+ */
+ if (pipe_ctx->bottom_pipe && pipe_ctx->top_pipe)
+ pipe_ctx->bottom_pipe->top_pipe = pipe_ctx->top_pipe;
+
+ /*
+ * For head pipe detach surfaces from pipe for tail
+ * pipe just zero it out
+ */
+ if (!pipe_ctx->top_pipe) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ } else {
+ memset(pipe_ctx, 0, sizeof(*pipe_ctx));
+ }
+ }
+ }
+
+
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i] == plane_state) {
+
+ dc_plane_state_release(stream_status->plane_states[i]);
+ break;
+ }
+ }
+
+ if (i == stream_status->plane_count) {
+ dm_error("Existing plane_state not found; failed to detach it!\n");
+ return false;
+ }
+
+ stream_status->plane_count--;
+
+ /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+ for (; i < stream_status->plane_count; i++)
+ stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+ stream_status->plane_states[stream_status->plane_count] = NULL;
+
+ return true;
+}
+
+bool dc_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ stream_status = &context->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++)
+ if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
+ return false;
+
+ return true;
+}
+
+static bool add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < set_count; i++)
+ if (set[i].stream == stream)
+ break;
+
+ if (i == set_count) {
+ dm_error("Stream %p not found in set!\n", stream);
+ return false;
+ }
+
+ for (j = 0; j < set[i].plane_count; j++)
+ if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
+ return false;
+
+ return true;
+}
+
+bool dc_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *context)
+{
+ struct dc_validation_set set;
+ int i;
+
+ set.stream = stream;
+ set.plane_count = plane_count;
+
+ for (i = 0; i < plane_count; i++)
+ set.plane_states[i] = plane_states[i];
+
+ return add_all_planes_for_stream(dc, stream, &set, 1, context);
+}
+
+
+
+static bool is_timing_changed(struct dc_stream_state *cur_stream,
+ struct dc_stream_state *new_stream)
+{
+ if (cur_stream == NULL)
+ return true;
+
+ /* If sink pointer changed, it means this is a hotplug, we should do
+ * full hw setting.
+ */
+ if (cur_stream->sink != new_stream->sink)
+ return true;
+
+ /* If output color space is changed, need to reprogram info frames */
+ if (cur_stream->output_color_space != new_stream->output_color_space)
+ return true;
+
+ return memcmp(
+ &cur_stream->timing,
+ &new_stream->timing,
+ sizeof(struct dc_crtc_timing)) != 0;
+}
+
+static bool are_stream_backends_same(
+ struct dc_stream_state *stream_a, struct dc_stream_state *stream_b)
+{
+ if (stream_a == stream_b)
+ return true;
+
+ if (stream_a == NULL || stream_b == NULL)
+ return false;
+
+ if (is_timing_changed(stream_a, stream_b))
+ return false;
+
+ return true;
+}
+
+bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+
+ if (!are_stream_backends_same(old_stream, stream))
+ return false;
+
+ return true;
+}
+
+bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream)
+{
+ if (old_stream == stream)
+ return true;
+
+ if (old_stream == NULL || stream == NULL)
+ return false;
+
+ if (memcmp(&old_stream->src,
+ &stream->src,
+ sizeof(struct rect)) != 0)
+ return false;
+
+ if (memcmp(&old_stream->dst,
+ &stream->dst,
+ sizeof(struct rect)) != 0)
+ return false;
+
+ return true;
+}
+
+/* Maximum TMDS single link pixel clock 165MHz */
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
+
+static void update_stream_engine_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct stream_encoder *stream_enc,
+ bool acquired)
+{
+ int i;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (pool->stream_enc[i] == stream_enc)
+ res_ctx->is_stream_enc_acquired[i] = acquired;
+ }
+}
+
+/* TODO: release audio object */
+void update_audio_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct audio *audio,
+ bool acquired)
+{
+ int i;
+ for (i = 0; i < pool->audio_count; i++) {
+ if (pool->audios[i] == audio)
+ res_ctx->is_audio_acquired[i] = acquired;
+ }
+}
+
+static int acquire_first_free_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ if (!res_ctx->pipe_ctx[i].stream) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[i];
+ pipe_ctx->plane_res.mi = pool->mis[i];
+ pipe_ctx->plane_res.hubp = pool->hubps[i];
+ pipe_ctx->plane_res.ipp = pool->ipps[i];
+ pipe_ctx->plane_res.xfm = pool->transforms[i];
+ pipe_ctx->plane_res.dpp = pool->dpps[i];
+ pipe_ctx->stream_res.opp = pool->opps[i];
+ pipe_ctx->pipe_idx = i;
+
+
+ pipe_ctx->stream = stream;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static struct stream_encoder *find_first_free_match_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+ int j = -1;
+ struct dc_link *link = stream->sink->link;
+
+ for (i = 0; i < pool->stream_enc_count; i++) {
+ if (!res_ctx->is_stream_enc_acquired[i] &&
+ pool->stream_enc[i]) {
+ /* Store first available for MST second display
+ * in daisy chain use case */
+ j = i;
+ if (pool->stream_enc[i]->id ==
+ link->link_enc->preferred_engine)
+ return pool->stream_enc[i];
+ }
+ }
+
+ /*
+ * below can happen in cases when stream encoder is acquired:
+ * 1) for second MST display in chain, so preferred engine already
+ * acquired;
+ * 2) for another link, which preferred engine already acquired by any
+ * MST configuration.
+ *
+ * If signal is of DP type and preferred engine not found, return last available
+ *
+ * TODO - This is just a patch up and a generic solution is
+ * required for non DP connectors.
+ */
+
+ if (j >= 0 && dc_is_dp_signal(stream->signal))
+ return pool->stream_enc[j];
+
+ return NULL;
+}
+
+static struct audio *find_first_free_audio(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ enum engine_id id)
+{
+ int i;
+ for (i = 0; i < pool->audio_count; i++) {
+ if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+ /*we have enough audio endpoint, find the matching inst*/
+ if (id != i)
+ continue;
+
+ return pool->audios[i];
+ }
+ }
+ /*not found the matching one, first come first serve*/
+ for (i = 0; i < pool->audio_count; i++) {
+ if (res_ctx->is_audio_acquired[i] == false) {
+ return pool->audios[i];
+ }
+ }
+ return 0;
+}
+
+bool resource_is_stream_unchanged(
+ struct dc_state *old_context, struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < old_context->stream_count; i++) {
+ struct dc_stream_state *old_stream = old_context->streams[i];
+
+ if (are_stream_backends_same(old_stream, stream))
+ return true;
+ }
+
+ return false;
+}
+
+enum dc_status dc_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ enum dc_status res;
+
+ if (new_ctx->stream_count >= dc->res_pool->pipe_count) {
+ DC_ERROR("Max streams reached, can add stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ new_ctx->streams[new_ctx->stream_count] = stream;
+ dc_stream_retain(stream);
+ new_ctx->stream_count++;
+
+ res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
+ if (res != DC_OK)
+ DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+
+ return res;
+}
+
+enum dc_status dc_remove_stream_from_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_context *dc_ctx = dc->ctx;
+ struct pipe_ctx *del_pipe = NULL;
+
+ /* Release primary pipe */
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
+ !new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+ ASSERT(del_pipe->stream_res.stream_enc);
+ update_stream_engine_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.stream_enc,
+ false);
+
+ if (del_pipe->stream_res.audio)
+ update_audio_usage(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->stream_res.audio,
+ false);
+
+ resource_unreference_clock_source(&new_ctx->res_ctx,
+ dc->res_pool,
+ del_pipe->clock_source);
+
+ memset(del_pipe, 0, sizeof(*del_pipe));
+
+ break;
+ }
+ }
+
+ if (!del_pipe) {
+ DC_ERROR("Pipe not found for stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++)
+ if (new_ctx->streams[i] == stream)
+ break;
+
+ if (new_ctx->streams[i] != stream) {
+ DC_ERROR("Context doesn't have stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ dc_stream_release(new_ctx->streams[i]);
+ new_ctx->stream_count--;
+
+ /* Trim back arrays */
+ for (; i < new_ctx->stream_count; i++) {
+ new_ctx->streams[i] = new_ctx->streams[i + 1];
+ new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
+ }
+
+ new_ctx->streams[new_ctx->stream_count] = NULL;
+ memset(
+ &new_ctx->stream_status[new_ctx->stream_count],
+ 0,
+ sizeof(new_ctx->stream_status[0]));
+
+ return DC_OK;
+}
+
+static void copy_pipe_ctx(
+ const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx)
+{
+ struct dc_plane_state *plane_state = to_pipe_ctx->plane_state;
+ struct dc_stream_state *stream = to_pipe_ctx->stream;
+
+ *to_pipe_ctx = *from_pipe_ctx;
+ to_pipe_ctx->stream = stream;
+ if (plane_state != NULL)
+ to_pipe_ctx->plane_state = plane_state;
+}
+
+static struct dc_stream_state *find_pll_sharable_stream(
+ struct dc_stream_state *stream_needs_pll,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream_has_pll = context->streams[i];
+
+ /* We are looking for non dp, non virtual stream */
+ if (resource_are_streams_timing_synchronizable(
+ stream_needs_pll, stream_has_pll)
+ && !dc_is_dp_signal(stream_has_pll->signal)
+ && stream_has_pll->sink->link->connector_signal
+ != SIGNAL_TYPE_VIRTUAL)
+ return stream_has_pll;
+
+ }
+
+ return NULL;
+}
+
+static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+{
+ uint32_t pix_clk = timing->pix_clk_khz;
+ uint32_t normalized_pix_clk = pix_clk;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pix_clk /= 2;
+ if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ normalized_pix_clk = pix_clk;
+ break;
+ case COLOR_DEPTH_101010:
+ normalized_pix_clk = (pix_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_pix_clk = (pix_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_pix_clk = (pix_clk * 48) / 24;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ return normalized_pix_clk;
+}
+
+static void calculate_phy_pix_clks(struct dc_stream_state *stream)
+{
+ /* update actual pixel clock on all streams */
+ if (dc_is_hdmi_signal(stream->signal))
+ stream->phy_pix_clk = get_norm_pix_clk(
+ &stream->timing);
+ else
+ stream->phy_pix_clk =
+ stream->timing.pix_clk_khz;
+}
+
+enum dc_status resource_map_pool_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ const struct resource_pool *pool = dc->res_pool;
+ int i;
+ struct dc_context *dc_ctx = dc->ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ int pipe_idx = -1;
+
+ /* TODO Check if this is needed */
+ /*if (!resource_is_stream_unchanged(old_context, stream)) {
+ if (stream != NULL && old_context->streams[i] != NULL) {
+ stream->bit_depth_params =
+ old_context->streams[i]->bit_depth_params;
+ stream->clamping = old_context->streams[i]->clamping;
+ continue;
+ }
+ }
+ */
+
+ /* acquire new resources */
+ pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ if (pipe_idx < 0)
+ pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
+#endif
+
+ if (pipe_idx < 0)
+ return DC_NO_CONTROLLER_RESOURCE;
+
+ pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+ pipe_ctx->stream_res.stream_enc =
+ find_first_free_match_stream_enc_for_link(
+ &context->res_ctx, pool, stream);
+
+ if (!pipe_ctx->stream_res.stream_enc)
+ return DC_NO_STREAM_ENG_RESOURCE;
+
+ update_stream_engine_usage(
+ &context->res_ctx, pool,
+ pipe_ctx->stream_res.stream_enc,
+ true);
+
+ /* TODO: Add check if ASIC support and EDID audio */
+ if (!stream->sink->converter_disable_audio &&
+ dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
+ stream->audio_info.mode_count) {
+ pipe_ctx->stream_res.audio = find_first_free_audio(
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+
+ /*
+ * Audio assigned in order first come first get.
+ * There are asics which has number of audio
+ * resources less then number of pipes
+ */
+ if (pipe_ctx->stream_res.audio)
+ update_audio_usage(&context->res_ctx, pool,
+ pipe_ctx->stream_res.audio, true);
+ }
+
+ for (i = 0; i < context->stream_count; i++)
+ if (context->streams[i] == stream) {
+ context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ return DC_OK;
+ }
+
+ DC_ERROR("Stream %p not found in new ctx!\n", stream);
+ return DC_ERROR_UNEXPECTED;
+}
+
+/* first stream in the context is used to populate the rest */
+void validate_guaranteed_copy_streams(
+ struct dc_state *context,
+ int max_streams)
+{
+ int i;
+
+ for (i = 1; i < max_streams; i++) {
+ context->streams[i] = context->streams[0];
+
+ copy_pipe_ctx(&context->res_ctx.pipe_ctx[0],
+ &context->res_ctx.pipe_ctx[i]);
+ context->res_ctx.pipe_ctx[i].stream =
+ context->res_ctx.pipe_ctx[0].stream;
+
+ dc_stream_retain(context->streams[i]);
+ context->stream_count++;
+ }
+}
+
+void dc_resource_state_copy_construct_current(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+{
+ dc_resource_state_copy_construct(dc->current_state, dst_ctx);
+}
+
+
+void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx)
+{
+ dst_ctx->dis_clk = dc->res_pool->display_clock;
+}
+
+enum dc_status dc_validate_global_state(
+ struct dc *dc,
+ struct dc_state *new_ctx)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ int i, j;
+
+ if (!new_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc->res_pool->funcs->validate_global) {
+ result = dc->res_pool->funcs->validate_global(dc, new_ctx);
+ if (result != DC_OK)
+ return result;
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++) {
+ struct dc_stream_state *stream = new_ctx->streams[i];
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* Switch to dp clock source only if there is
+ * no non dp stream that shares the same timing
+ * with the dp stream.
+ */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ !find_pll_sharable_stream(stream, new_ctx)) {
+
+ resource_unreference_clock_source(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+
+ pipe_ctx->clock_source = dc->res_pool->dp_clock_source;
+ resource_reference_clock_source(
+ &new_ctx->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+ }
+ }
+ }
+
+ result = resource_build_scaling_params_for_context(dc, new_ctx);
+
+ if (result == DC_OK)
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static void patch_gamut_packet_checksum(
+ struct encoder_info_packet *gamut_packet)
+{
+ /* For gamut we recalc checksum */
+ if (gamut_packet->valid) {
+ uint8_t chk_sum = 0;
+ uint8_t *ptr;
+ uint8_t i;
+
+ /*start of the Gamut data. */
+ ptr = &gamut_packet->sb[3];
+
+ for (i = 0; i <= gamut_packet->sb[1]; i++)
+ chk_sum += ptr[i];
+
+ gamut_packet->sb[2] = (uint8_t) (0x100 - chk_sum);
+ }
+}
+
+static void set_avi_info_frame(
+ struct encoder_info_packet *info_packet,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+ struct info_frame info_frame = { {0} };
+ uint32_t pixel_encoding = 0;
+ enum scanning_type scan_type = SCANNING_TYPE_NODATA;
+ enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
+ bool itc = false;
+ uint8_t itc_value = 0;
+ uint8_t cn0_cn1 = 0;
+ unsigned int cn0_cn1_value = 0;
+ uint8_t *check_sum = NULL;
+ uint8_t byte_index = 0;
+ union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi;
+ union display_content_support support = {0};
+ unsigned int vic = pipe_ctx->stream->timing.vic;
+ enum dc_timing_3d_format format;
+
+ color_space = pipe_ctx->stream->output_color_space;
+ if (color_space == COLOR_SPACE_UNKNOWN)
+ color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
+ COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709;
+
+ /* Initialize header */
+ hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI;
+ /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
+ * not be used in HDMI 2.0 (Section 10.1) */
+ hdmi_info->bits.header.version = 2;
+ hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE;
+
+ /*
+ * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
+ * according to HDMI 2.0 spec (Section 10.1)
+ */
+
+ switch (stream->timing.pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ pixel_encoding = 1;
+ break;
+
+ case PIXEL_ENCODING_YCBCR444:
+ pixel_encoding = 2;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ pixel_encoding = 3;
+ break;
+
+ case PIXEL_ENCODING_RGB:
+ default:
+ pixel_encoding = 0;
+ }
+
+ /* Y0_Y1_Y2 : The pixel encoding */
+ /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
+ hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding;
+
+ /* A0 = 1 Active Format Information valid */
+ hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID;
+
+ /* B0, B1 = 3; Bar info data is valid */
+ hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID;
+
+ hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM;
+
+ /* S0, S1 : Underscan / Overscan */
+ /* TODO: un-hardcode scan type */
+ scan_type = SCANNING_TYPE_UNDERSCAN;
+ hdmi_info->bits.S0_S1 = scan_type;
+
+ /* C0, C1 : Colorimetry */
+ if (color_space == COLOR_SPACE_YCBCR709 ||
+ color_space == COLOR_SPACE_YCBCR709_LIMITED)
+ hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709;
+ else if (color_space == COLOR_SPACE_YCBCR601 ||
+ color_space == COLOR_SPACE_YCBCR601_LIMITED)
+ hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601;
+ else {
+ hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA;
+ }
+ if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE ||
+ color_space == COLOR_SPACE_2020_YCBCR) {
+ hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR;
+ hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ } else if (color_space == COLOR_SPACE_ADOBERGB) {
+ hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB;
+ hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED;
+ }
+
+ /* TODO: un-hardcode aspect ratio */
+ aspect = stream->timing.aspect_ratio;
+
+ switch (aspect) {
+ case ASPECT_RATIO_4_3:
+ case ASPECT_RATIO_16_9:
+ hdmi_info->bits.M0_M1 = aspect;
+ break;
+
+ case ASPECT_RATIO_NO_DATA:
+ case ASPECT_RATIO_64_27:
+ case ASPECT_RATIO_256_135:
+ default:
+ hdmi_info->bits.M0_M1 = 0;
+ }
+
+ /* Active Format Aspect ratio - same as Picture Aspect Ratio. */
+ hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+
+ /* TODO: un-hardcode cn0_cn1 and itc */
+
+ cn0_cn1 = 0;
+ cn0_cn1_value = 0;
+
+ itc = true;
+ itc_value = 1;
+
+ support = stream->sink->edid_caps.content_support;
+
+ if (itc) {
+ if (!support.bits.valid_content_type) {
+ cn0_cn1_value = 0;
+ } else {
+ if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GRAPHICS) {
+ if (support.bits.graphics_content == 1) {
+ cn0_cn1_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_PHOTO) {
+ if (support.bits.photo_content == 1) {
+ cn0_cn1_value = 1;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_CINEMA) {
+ if (support.bits.cinema_content == 1) {
+ cn0_cn1_value = 2;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ } else if (cn0_cn1 == DISPLAY_CONTENT_TYPE_GAME) {
+ if (support.bits.game_content == 1) {
+ cn0_cn1_value = 3;
+ } else {
+ cn0_cn1_value = 0;
+ itc_value = 0;
+ }
+ }
+ }
+ hdmi_info->bits.CN0_CN1 = cn0_cn1_value;
+ hdmi_info->bits.ITC = itc_value;
+ }
+
+ /* TODO : We should handle YCC quantization */
+ /* but we do not have matrix calculation */
+ if (stream->sink->edid_caps.qs_bit == 1 &&
+ stream->sink->edid_caps.qy_bit == 1) {
+ if (color_space == COLOR_SPACE_SRGB ||
+ color_space == COLOR_SPACE_2020_RGB_FULLRANGE) {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE;
+ } else if (color_space == COLOR_SPACE_SRGB_LIMITED ||
+ color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ } else {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ }
+ } else {
+ hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE;
+ hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;
+ }
+
+ ///VIC
+ format = stream->timing.timing_3d_format;
+ /*todo, add 3DStereo support*/
+ if (format != TIMING_3D_FORMAT_NONE) {
+ // Based on HDMI specs hdmi vic needs to be converted to cea vic when 3D is enabled
+ switch (pipe_ctx->stream->timing.hdmi_vic) {
+ case 1:
+ vic = 95;
+ break;
+ case 2:
+ vic = 94;
+ break;
+ case 3:
+ vic = 93;
+ break;
+ case 4:
+ vic = 98;
+ break;
+ default:
+ break;
+ }
+ }
+ hdmi_info->bits.VIC0_VIC7 = vic;
+
+ /* pixel repetition
+ * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
+ * repetition start from 1 */
+ hdmi_info->bits.PR0_PR3 = 0;
+
+ /* Bar Info
+ * barTop: Line Number of End of Top Bar.
+ * barBottom: Line Number of Start of Bottom Bar.
+ * barLeft: Pixel Number of End of Left Bar.
+ * barRight: Pixel Number of Start of Right Bar. */
+ hdmi_info->bits.bar_top = stream->timing.v_border_top;
+ hdmi_info->bits.bar_bottom = (stream->timing.v_total
+ - stream->timing.v_border_bottom + 1);
+ hdmi_info->bits.bar_left = stream->timing.h_border_left;
+ hdmi_info->bits.bar_right = (stream->timing.h_total
+ - stream->timing.h_border_right + 1);
+
+ /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
+ check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+
+ *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2;
+
+ for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++)
+ *check_sum += hdmi_info->packet_raw_data.sb[byte_index];
+
+ /* one byte complement */
+ *check_sum = (uint8_t) (0x100 - *check_sum);
+
+ /* Store in hw_path_mode */
+ info_packet->hb0 = hdmi_info->packet_raw_data.hb0;
+ info_packet->hb1 = hdmi_info->packet_raw_data.hb1;
+ info_packet->hb2 = hdmi_info->packet_raw_data.hb2;
+
+ for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet.
+ info_packet_hdmi.packet_raw_data.sb); byte_index++)
+ info_packet->sb[byte_index] = info_frame.avi_info_packet.
+ info_packet_hdmi.packet_raw_data.sb[byte_index];
+
+ info_packet->valid = true;
+}
+
+static void set_vendor_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ uint32_t length = 0;
+ bool hdmi_vic_mode = false;
+ uint8_t checksum = 0;
+ uint32_t i = 0;
+ enum dc_timing_3d_format format;
+ // Can be different depending on packet content /*todo*/
+ // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
+
+ info_packet->valid = false;
+
+ format = stream->timing.timing_3d_format;
+ if (stream->view_format == VIEW_3D_FORMAT_NONE)
+ format = TIMING_3D_FORMAT_NONE;
+
+ /* Can be different depending on packet content */
+ length = 5;
+
+ if (stream->timing.hdmi_vic != 0
+ && stream->timing.h_total >= 3840
+ && stream->timing.v_total >= 2160)
+ hdmi_vic_mode = true;
+
+ /* According to HDMI 1.4a CTS, VSIF should be sent
+ * for both 3D stereo and HDMI VIC modes.
+ * For all other modes, there is no VSIF sent. */
+
+ if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+ return;
+
+ /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
+ info_packet->sb[1] = 0x03;
+ info_packet->sb[2] = 0x0C;
+ info_packet->sb[3] = 0x00;
+
+ /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
+ * The value for HDMI_Video_Format are:
+ * 0x0 (0b000) - No additional HDMI video format is presented in this
+ * packet
+ * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
+ * parameter follows
+ * 0x2 (0b010) - 3D format indication present. 3D_Structure and
+ * potentially 3D_Ext_Data follows
+ * 0x3..0x7 (0b011..0b111) - reserved for future use */
+ if (format != TIMING_3D_FORMAT_NONE)
+ info_packet->sb[4] = (2 << 5);
+ else if (hdmi_vic_mode)
+ info_packet->sb[4] = (1 << 5);
+
+ /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
+ * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
+ * The value for 3D_Structure are:
+ * 0x0 - Frame Packing
+ * 0x1 - Field Alternative
+ * 0x2 - Line Alternative
+ * 0x3 - Side-by-Side (full)
+ * 0x4 - L + depth
+ * 0x5 - L + depth + graphics + graphics-depth
+ * 0x6 - Top-and-Bottom
+ * 0x7 - Reserved for future use
+ * 0x8 - Side-by-Side (Half)
+ * 0x9..0xE - Reserved for future use
+ * 0xF - Not used */
+ switch (format) {
+ case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+ case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+ info_packet->sb[5] = (0x0 << 4);
+ break;
+
+ case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+ case TIMING_3D_FORMAT_SBS_SW_PACKED:
+ info_packet->sb[5] = (0x8 << 4);
+ length = 6;
+ break;
+
+ case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+ case TIMING_3D_FORMAT_TB_SW_PACKED:
+ info_packet->sb[5] = (0x6 << 4);
+ break;
+
+ default:
+ break;
+ }
+
+ /*PB5: If PB4 is set to 0x1 (extended resolution format)
+ * fill PB5 with the correct HDMI VIC code */
+ if (hdmi_vic_mode)
+ info_packet->sb[5] = stream->timing.hdmi_vic;
+
+ /* Header */
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
+ info_packet->hb1 = 0x01; /* Version */
+
+ /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
+ info_packet->hb2 = (uint8_t) (length);
+
+ /* Calculate checksum */
+ checksum = 0;
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= length; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
+static void set_spd_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ /* SPD info packet for FreeSync */
+
+ unsigned char checksum = 0;
+ unsigned int idx, payload_size = 0;
+
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (stream->freesync_ctx.supported == false)
+ return;
+
+ if (dc_is_hdmi_signal(stream->signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ info_packet->hb0 = HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB1 = Version = 0x01 */
+ info_packet->hb1 = 0x01;
+
+ /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */
+ info_packet->hb2 = 0x08;
+
+ payload_size = 0x08;
+
+ } else if (dc_is_dp_signal(stream->signal)) {
+
+ /* HEADER */
+
+ /* HB0 = Secondary-data Packet ID = 0 - Only non-zero
+ * when used to associate audio related info packets
+ */
+ info_packet->hb0 = 0x00;
+
+ /* HB1 = Packet Type = 0x83 (Source Product
+ * Descriptor InfoFrame)
+ */
+ info_packet->hb1 = HDMI_INFOFRAME_TYPE_SPD;
+
+ /* HB2 = [Bits 7:0 = Least significant eight bits -
+ * For INFOFRAME, the value must be 1Bh]
+ */
+ info_packet->hb2 = 0x1B;
+
+ /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x1]
+ * [Bits 1:0 = Most significant two bits = 0x00]
+ */
+ info_packet->hb3 = 0x04;
+
+ payload_size = 0x1B;
+ }
+
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ info_packet->sb[1] = 0x1A;
+
+ /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+ info_packet->sb[2] = 0x00;
+
+ /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+ info_packet->sb[3] = 0x00;
+
+ /* PB4 = Reserved */
+ info_packet->sb[4] = 0x00;
+
+ /* PB5 = Reserved */
+ info_packet->sb[5] = 0x00;
+
+ /* PB6 = [Bits 7:3 = Reserved] */
+ info_packet->sb[6] = 0x00;
+
+ if (stream->freesync_ctx.supported == true)
+ /* PB6 = [Bit 0 = FreeSync Supported] */
+ info_packet->sb[6] |= 0x01;
+
+ if (stream->freesync_ctx.enabled == true)
+ /* PB6 = [Bit 1 = FreeSync Enabled] */
+ info_packet->sb[6] |= 0x02;
+
+ if (stream->freesync_ctx.active == true)
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ info_packet->sb[6] |= 0x04;
+
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ info_packet->sb[7] = (unsigned char) (stream->freesync_ctx.
+ min_refresh_in_micro_hz / 1000000);
+
+ /* PB8 = FreeSync Maximum refresh rate (Hz)
+ *
+ * Note: We do not use the maximum capable refresh rate
+ * of the panel, because we should never go above the field
+ * rate of the mode timing set.
+ */
+ info_packet->sb[8] = (unsigned char) (stream->freesync_ctx.
+ nominal_refresh_in_micro_hz / 1000000);
+
+ /* PB9 - PB27 = Reserved */
+ for (idx = 9; idx <= 27; idx++)
+ info_packet->sb[idx] = 0x00;
+
+ /* Calculate checksum */
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+ checksum += info_packet->hb3;
+
+ for (idx = 1; idx <= payload_size; idx++)
+ checksum += info_packet->sb[idx];
+
+ /* PB0 = Checksum (one byte complement) */
+ info_packet->sb[0] = (unsigned char) (0x100 - checksum);
+
+ info_packet->valid = true;
+}
+
+static void set_hdr_static_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_plane_state *plane_state,
+ struct dc_stream_state *stream)
+{
+ uint16_t i = 0;
+ enum signal_type signal = stream->signal;
+ struct dc_hdr_static_metadata hdr_metadata;
+ uint32_t data;
+
+ if (!plane_state)
+ return;
+
+ hdr_metadata = plane_state->hdr_static_ctx;
+
+ if (!hdr_metadata.hdr_supported)
+ return;
+
+ if (dc_is_hdmi_signal(signal)) {
+ info_packet->valid = true;
+
+ info_packet->hb0 = 0x87;
+ info_packet->hb1 = 0x01;
+ info_packet->hb2 = 0x1A;
+ i = 1;
+ } else if (dc_is_dp_signal(signal)) {
+ info_packet->valid = true;
+
+ info_packet->hb0 = 0x00;
+ info_packet->hb1 = 0x87;
+ info_packet->hb2 = 0x1D;
+ info_packet->hb3 = (0x13 << 2);
+ i = 2;
+ }
+
+ data = hdr_metadata.is_hdr;
+ info_packet->sb[i++] = data ? 0x02 : 0x00;
+ info_packet->sb[i++] = 0x00;
+
+ data = hdr_metadata.chromaticity_green_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_green_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_blue_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_blue_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_red_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_red_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_white_point_x / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.chromaticity_white_point_y / 2;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.max_luminance;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.min_luminance;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.maximum_content_light_level;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ data = hdr_metadata.maximum_frame_average_light_level;
+ info_packet->sb[i++] = data & 0xFF;
+ info_packet->sb[i++] = (data & 0xFF00) >> 8;
+
+ if (dc_is_hdmi_signal(signal)) {
+ uint32_t checksum = 0;
+
+ checksum += info_packet->hb0;
+ checksum += info_packet->hb1;
+ checksum += info_packet->hb2;
+
+ for (i = 1; i <= info_packet->hb2; i++)
+ checksum += info_packet->sb[i];
+
+ info_packet->sb[0] = 0x100 - checksum;
+ } else if (dc_is_dp_signal(signal)) {
+ info_packet->sb[0] = 0x01;
+ info_packet->sb[1] = 0x1A;
+ }
+}
+
+static void set_vsc_info_packet(
+ struct encoder_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ unsigned int vscPacketRevision = 0;
+ unsigned int i;
+
+ if (stream->sink->link->psr_enabled) {
+ vscPacketRevision = 2;
+ }
+
+ /* VSC packet not needed based on the features
+ * supported by this DP display
+ */
+ if (vscPacketRevision == 0)
+ return;
+
+ if (vscPacketRevision == 0x2) {
+ /* Secondary-data Packet ID = 0*/
+ info_packet->hb0 = 0x00;
+ /* 07h - Packet Type Value indicating Video
+ * Stream Configuration packet
+ */
+ info_packet->hb1 = 0x07;
+ /* 02h = VSC SDP supporting 3D stereo and PSR
+ * (applies to eDP v1.3 or higher).
+ */
+ info_packet->hb2 = 0x02;
+ /* 08h = VSC packet supporting 3D stereo + PSR
+ * (HB2 = 02h).
+ */
+ info_packet->hb3 = 0x08;
+
+ for (i = 0; i < 28; i++)
+ info_packet->sb[i] = 0;
+
+ info_packet->valid = true;
+ }
+
+ /*TODO: stereo 3D support and extend pixel encoding colorimetry*/
+}
+
+void dc_resource_state_destruct(struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < context->stream_count; i++) {
+ for (j = 0; j < context->stream_status[i].plane_count; j++)
+ dc_plane_state_release(
+ context->stream_status[i].plane_states[j]);
+
+ context->stream_status[i].plane_count = 0;
+ dc_stream_release(context->streams[i]);
+ context->streams[i] = NULL;
+ }
+}
+
+/*
+ * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
+ * by the src_ctx
+ */
+void dc_resource_state_copy_construct(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx)
+{
+ int i, j;
+ struct kref refcount = dst_ctx->refcount;
+
+ *dst_ctx = *src_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ }
+
+ for (i = 0; i < dst_ctx->stream_count; i++) {
+ dc_stream_retain(dst_ctx->streams[i]);
+ for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ dst_ctx->stream_status[i].plane_states[j]);
+ }
+
+ /* context refcount should not be overridden */
+ dst_ctx->refcount = refcount;
+
+}
+
+struct clock_source *dc_resource_find_first_free_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; ++i) {
+ if (res_ctx->clock_source_ref_count[i] == 0)
+ return pool->clock_sources[i];
+ }
+
+ return NULL;
+}
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* default all packets to invalid */
+ info->avi.valid = false;
+ info->gamut.valid = false;
+ info->vendor.valid = false;
+ info->spd.valid = false;
+ info->hdrsmd.valid = false;
+ info->vsc.valid = false;
+
+ signal = pipe_ctx->stream->signal;
+
+ /* HDMi and DP have different info packets*/
+ if (dc_is_hdmi_signal(signal)) {
+ set_avi_info_frame(&info->avi, pipe_ctx);
+
+ set_vendor_info_packet(&info->vendor, pipe_ctx->stream);
+
+ set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+ set_hdr_static_info_packet(&info->hdrsmd,
+ pipe_ctx->plane_state, pipe_ctx->stream);
+
+ } else if (dc_is_dp_signal(signal)) {
+ set_vsc_info_packet(&info->vsc, pipe_ctx->stream);
+
+ set_spd_info_packet(&info->spd, pipe_ctx->stream);
+
+ set_hdr_static_info_packet(&info->hdrsmd,
+ pipe_ctx->plane_state, pipe_ctx->stream);
+ }
+
+ patch_gamut_packet_checksum(&info->gamut);
+}
+
+enum dc_status resource_map_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ /* acquire new resources */
+ const struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ &context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)
+ || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->clock_source = pool->dp_clock_source;
+ else {
+ pipe_ctx->clock_source = NULL;
+
+ if (!dc->config.disable_disp_pll_sharing)
+ pipe_ctx->clock_source = resource_find_used_clk_src_for_sharing(
+ &context->res_ctx,
+ pipe_ctx);
+
+ if (pipe_ctx->clock_source == NULL)
+ pipe_ctx->clock_source =
+ dc_resource_find_first_free_pll(
+ &context->res_ctx,
+ pool);
+ }
+
+ if (pipe_ctx->clock_source == NULL)
+ return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+ resource_reference_clock_source(
+ &context->res_ctx, pool,
+ pipe_ctx->clock_source);
+
+ return DC_OK;
+}
+
+/*
+ * Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+bool pipe_need_reprogram(
+ struct pipe_ctx *pipe_ctx_old,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx_old->stream)
+ return false;
+
+ if (pipe_ctx_old->stream->sink != pipe_ctx->stream->sink)
+ return true;
+
+ if (pipe_ctx_old->stream->signal != pipe_ctx->stream->signal)
+ return true;
+
+ if (pipe_ctx_old->stream_res.audio != pipe_ctx->stream_res.audio)
+ return true;
+
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source
+ && pipe_ctx_old->stream != pipe_ctx->stream)
+ return true;
+
+ if (pipe_ctx_old->stream_res.stream_enc != pipe_ctx->stream_res.stream_enc)
+ return true;
+
+ if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
+ return true;
+
+
+ return false;
+}
+
+void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth)
+{
+ enum dc_dither_option option = stream->dither_option;
+ enum dc_pixel_encoding pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+ if (option == DITHER_OPTION_DEFAULT) {
+ switch (stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ option = DITHER_OPTION_SPATIAL6;
+ break;
+ case COLOR_DEPTH_888:
+ option = DITHER_OPTION_SPATIAL8;
+ break;
+ case COLOR_DEPTH_101010:
+ option = DITHER_OPTION_SPATIAL10;
+ break;
+ default:
+ option = DITHER_OPTION_DISABLE;
+ }
+ }
+
+ if (option == DITHER_OPTION_DISABLE)
+ return;
+
+ if (option == DITHER_OPTION_TRUN6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
+ } else if (option == DITHER_OPTION_TRUN8 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
+ } else if (option == DITHER_OPTION_TRUN10 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ }
+
+ /* special case - Formatter can only reduce by 4 bits at most.
+ * When reducing from 12 to 6 bits,
+ * HW recommends we use trunc with round mode
+ * (if we did nothing, trunc to 10 bits would be used)
+ * note that any 12->10 bit reduction is ignored prior to DCE8,
+ * as the input was 10 bits.
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_FM6) {
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
+ fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
+ }
+
+ /* spatial dither
+ * note that spatial modes 1-3 are never used
+ */
+ if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL6 ||
+ option == DITHER_OPTION_TRUN8_SPATIAL6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
+ option == DITHER_OPTION_SPATIAL10 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM6) {
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
+ fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
+ fmt_bit_depth->flags.RGB_RANDOM =
+ (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ }
+
+ if (option == DITHER_OPTION_SPATIAL6 ||
+ option == DITHER_OPTION_SPATIAL8 ||
+ option == DITHER_OPTION_SPATIAL10) {
+ fmt_bit_depth->flags.FRAME_RANDOM = 0;
+ } else {
+ fmt_bit_depth->flags.FRAME_RANDOM = 1;
+ }
+
+ //////////////////////
+ //// temporal dither
+ //////////////////////
+ if (option == DITHER_OPTION_FM6 ||
+ option == DITHER_OPTION_SPATIAL8_FM6 ||
+ option == DITHER_OPTION_SPATIAL10_FM6 ||
+ option == DITHER_OPTION_TRUN10_FM6 ||
+ option == DITHER_OPTION_TRUN8_FM6 ||
+ option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
+ } else if (option == DITHER_OPTION_FM8 ||
+ option == DITHER_OPTION_SPATIAL10_FM8 ||
+ option == DITHER_OPTION_TRUN10_FM8) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
+ } else if (option == DITHER_OPTION_FM10) {
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
+ fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
+ }
+
+ fmt_bit_depth->pixel_encoding = pixel_encoding;
+}
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
+{
+ struct dc *core_dc = dc;
+ struct dc_link *link = stream->sink->link;
+ struct timing_generator *tg = core_dc->res_pool->timing_generators[0];
+ enum dc_status res = DC_OK;
+
+ calculate_phy_pix_clks(stream);
+
+ if (!tg->funcs->validate_timing(tg, &stream->timing))
+ res = DC_FAIL_CONTROLLER_VALIDATE;
+
+ if (res == DC_OK)
+ if (!link->link_enc->funcs->validate_output_with_stream(
+ link->link_enc, stream))
+ res = DC_FAIL_ENC_VALIDATE;
+
+ /* TODO: validate audio ASIC caps, encoder */
+
+ if (res == DC_OK)
+ res = dc_link_validate_mode_timing(stream,
+ link,
+ &stream->timing);
+
+ return res;
+}
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state)
+{
+ enum dc_status res = DC_OK;
+
+ /* TODO For now validates pixel format only */
+ if (dc->res_pool->funcs->validate_plane)
+ return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
+
+ return res;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
new file mode 100644
index 000000000000..25fae38409ab
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+
+static void destruct(struct dc_sink *sink)
+{
+ if (sink->dc_container_id) {
+ kfree(sink->dc_container_id);
+ sink->dc_container_id = NULL;
+ }
+}
+
+static bool construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)
+{
+
+ struct dc_link *link = init_params->link;
+
+ if (!link)
+ return false;
+
+ sink->sink_signal = init_params->sink_signal;
+ sink->link = link;
+ sink->ctx = link->ctx;
+ sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
+ sink->converter_disable_audio = init_params->converter_disable_audio;
+ sink->dc_container_id = NULL;
+
+ return true;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+void dc_sink_retain(struct dc_sink *sink)
+{
+ kref_get(&sink->refcount);
+}
+
+static void dc_sink_free(struct kref *kref)
+{
+ struct dc_sink *sink = container_of(kref, struct dc_sink, refcount);
+ destruct(sink);
+ kfree(sink);
+}
+
+void dc_sink_release(struct dc_sink *sink)
+{
+ kref_put(&sink->refcount, dc_sink_free);
+}
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
+{
+ struct dc_sink *sink = kzalloc(sizeof(*sink), GFP_KERNEL);
+
+ if (NULL == sink)
+ goto alloc_fail;
+
+ if (false == construct(sink, init_params))
+ goto construct_fail;
+
+ kref_init(&sink->refcount);
+
+ return sink;
+
+construct_fail:
+ kfree(sink);
+
+alloc_fail:
+ return NULL;
+}
+
+/*******************************************************************************
+ * Protected functions - visible only inside of DC (not visible in DM)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
new file mode 100644
index 000000000000..e230cc44a0a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "resource.h"
+#include "ipp.h"
+#include "timing_generator.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
+static void update_stream_signal(struct dc_stream_state *stream)
+{
+ if (stream->output_signal == SIGNAL_TYPE_NONE) {
+ struct dc_sink *dc_sink = stream->sink;
+
+ if (dc_sink->sink_signal == SIGNAL_TYPE_NONE)
+ stream->signal = stream->sink->link->connector_signal;
+ else
+ stream->signal = dc_sink->sink_signal;
+ } else {
+ stream->signal = stream->output_signal;
+ }
+
+ if (dc_is_dvi_signal(stream->signal)) {
+ if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
+ stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ else
+ stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+}
+
+static void construct(struct dc_stream_state *stream,
+ struct dc_sink *dc_sink_data)
+{
+ uint32_t i = 0;
+
+ stream->sink = dc_sink_data;
+ stream->ctx = stream->sink->ctx;
+
+ dc_sink_retain(dc_sink_data);
+
+ /* Copy audio modes */
+ /* TODO - Remove this translation */
+ for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
+ {
+ stream->audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
+ stream->audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
+ stream->audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
+ stream->audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size;
+ }
+ stream->audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count;
+ stream->audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency;
+ stream->audio_info.video_latency = dc_sink_data->edid_caps.video_latency;
+ memmove(
+ stream->audio_info.display_name,
+ dc_sink_data->edid_caps.display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+ stream->audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id;
+ stream->audio_info.product_id = dc_sink_data->edid_caps.product_id;
+ stream->audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags;
+
+ if (dc_sink_data->dc_container_id != NULL) {
+ struct dc_container_id *dc_container_id = dc_sink_data->dc_container_id;
+
+ stream->audio_info.port_id[0] = dc_container_id->portId[0];
+ stream->audio_info.port_id[1] = dc_container_id->portId[1];
+ } else {
+ /* TODO - WindowDM has implemented,
+ other DMs need Unhardcode port_id */
+ stream->audio_info.port_id[0] = 0x5558859e;
+ stream->audio_info.port_id[1] = 0xd989449;
+ }
+
+ /* EDID CAP translation for HDMI 2.0 */
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
+
+ stream->status.link = stream->sink->link;
+
+ update_stream_signal(stream);
+}
+
+static void destruct(struct dc_stream_state *stream)
+{
+ dc_sink_release(stream->sink);
+ if (stream->out_transfer_func != NULL) {
+ dc_transfer_func_release(
+ stream->out_transfer_func);
+ stream->out_transfer_func = NULL;
+ }
+}
+
+void dc_stream_retain(struct dc_stream_state *stream)
+{
+ kref_get(&stream->refcount);
+}
+
+static void dc_stream_free(struct kref *kref)
+{
+ struct dc_stream_state *stream = container_of(kref, struct dc_stream_state, refcount);
+
+ destruct(stream);
+ kfree(stream);
+}
+
+void dc_stream_release(struct dc_stream_state *stream)
+{
+ if (stream != NULL) {
+ kref_put(&stream->refcount, dc_stream_free);
+ }
+}
+
+struct dc_stream_state *dc_create_stream_for_sink(
+ struct dc_sink *sink)
+{
+ struct dc_stream_state *stream;
+
+ if (sink == NULL)
+ return NULL;
+
+ stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ if (stream == NULL)
+ return NULL;
+
+ construct(stream, sink);
+
+ kref_init(&stream->refcount);
+
+ return stream;
+}
+
+struct dc_stream_status *dc_stream_get_status(
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (stream == dc->current_state->streams[i])
+ return &dc->current_state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+/**
+ * Update the cursor attributes and set cursor surface address
+ */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+ if (NULL == attributes) {
+ dm_error("DC: attributes is NULL!\n");
+ return false;
+ }
+
+ if (attributes->address.quad_part == 0) {
+ dm_output_to_console("DC: Cursor address is 0!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ continue;
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ continue;
+
+
+ if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
+ pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
+ pipe_ctx->plane_res.ipp, attributes);
+
+ if (pipe_ctx->plane_res.hubp != NULL &&
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.hubp, attributes);
+
+ if (pipe_ctx->plane_res.mi != NULL &&
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.mi, attributes);
+
+
+ if (pipe_ctx->plane_res.xfm != NULL &&
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.xfm, attributes);
+
+ if (pipe_ctx->plane_res.dpp != NULL &&
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.dpp, attributes);
+ }
+
+ stream->cursor_attributes = *attributes;
+
+ return true;
+}
+
+bool dc_stream_set_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (NULL == stream) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ if (NULL == position) {
+ dm_error("DC: cursor position is NULL!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_cursor_position pos_cpy = *position;
+ struct dc_cursor_mi_param param = {
+ .pixel_clk_khz = stream->timing.pix_clk_khz,
+ .ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
+ .viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
+ .viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
+ .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
+ };
+
+ if (pipe_ctx->stream != stream ||
+ (!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
+ !pipe_ctx->plane_state ||
+ (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
+ continue;
+
+ if (pipe_ctx->plane_state->address.type
+ == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pos_cpy.enable = false;
+
+ if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
+ pos_cpy.enable = false;
+
+
+ if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
+ ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, &param);
+
+ if (mi != NULL && mi->funcs->set_cursor_position != NULL)
+ mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
+
+ if (!hubp)
+ continue;
+
+ if (hubp->funcs->set_cursor_position != NULL)
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
+
+ if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
+ dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
+
+ }
+
+ return true;
+}
+
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
+{
+ uint8_t i;
+ struct dc *core_dc = stream->ctx->dc;
+ struct resource_context *res_ctx =
+ &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+ if (res_ctx->pipe_ctx[i].stream != stream)
+ continue;
+
+ return tg->funcs->get_frame_count(tg);
+ }
+
+ return 0;
+}
+
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ uint8_t i;
+ bool ret = false;
+ struct dc *core_dc = stream->ctx->dc;
+ struct resource_context *res_ctx =
+ &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
+
+ if (res_ctx->pipe_ctx[i].stream != stream)
+ continue;
+
+ tg->funcs->get_scanoutpos(tg,
+ v_blank_start,
+ v_blank_end,
+ h_position,
+ v_position);
+
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+
+void dc_stream_log(
+ const struct dc_stream_state *stream,
+ struct dal_logger *dm_logger,
+ enum dc_log_type log_type)
+{
+
+ dm_logger_write(dm_logger,
+ log_type,
+ "core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d, colorSpace:%d\n",
+ stream,
+ stream->src.x,
+ stream->src.y,
+ stream->src.width,
+ stream->src.height,
+ stream->dst.x,
+ stream->dst.y,
+ stream->dst.width,
+ stream->dst.height,
+ stream->output_color_space);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tpix_clk_khz: %d, h_total: %d, v_total: %d, pixelencoder:%d, displaycolorDepth:%d\n",
+ stream->timing.pix_clk_khz,
+ stream->timing.h_total,
+ stream->timing.v_total,
+ stream->timing.pixel_encoding,
+ stream->timing.display_color_depth);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tsink name: %s, serial: %d\n",
+ stream->sink->edid_caps.display_name,
+ stream->sink->edid_caps.serial_number);
+ dm_logger_write(dm_logger,
+ log_type,
+ "\tlink: %d\n",
+ stream->sink->link->link_index);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
new file mode 100644
index 000000000000..ade5b8ee9c3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* DC interface (public) */
+#include "dm_services.h"
+#include "dc.h"
+
+/* DC core (private) */
+#include "core_types.h"
+#include "transform.h"
+#include "dpp.h"
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+{
+ plane_state->ctx = ctx;
+}
+
+static void destruct(struct dc_plane_state *plane_state)
+{
+ if (plane_state->gamma_correction != NULL) {
+ dc_gamma_release(&plane_state->gamma_correction);
+ }
+ if (plane_state->in_transfer_func != NULL) {
+ dc_transfer_func_release(
+ plane_state->in_transfer_func);
+ plane_state->in_transfer_func = NULL;
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
+ uint32_t controller_id)
+{
+ plane_state->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
+ /*register_flip_interrupt(surface);*/
+}
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc)
+{
+ struct dc *core_dc = dc;
+
+ struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state),
+ GFP_KERNEL);
+
+ if (NULL == plane_state)
+ return NULL;
+
+ kref_init(&plane_state->refcount);
+ construct(core_dc->ctx, plane_state);
+
+ return plane_state;
+}
+
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state)
+{
+ const struct dc_plane_status *plane_status;
+ struct dc *core_dc;
+ int i;
+
+ if (!plane_state ||
+ !plane_state->ctx ||
+ !plane_state->ctx->dc) {
+ ASSERT(0);
+ return NULL; /* remove this if above assert never hit */
+ }
+
+ plane_status = &plane_state->status;
+ core_dc = plane_state->ctx->dc;
+
+ if (core_dc->current_state == NULL)
+ return NULL;
+
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx =
+ &core_dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ core_dc->hwss.update_pending_status(pipe_ctx);
+ }
+
+ return plane_status;
+}
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state)
+{
+ kref_get(&plane_state->refcount);
+}
+
+static void dc_plane_state_free(struct kref *kref)
+{
+ struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount);
+ destruct(plane_state);
+ kfree(plane_state);
+}
+
+void dc_plane_state_release(struct dc_plane_state *plane_state)
+{
+ kref_put(&plane_state->refcount, dc_plane_state_free);
+}
+
+void dc_gamma_retain(struct dc_gamma *gamma)
+{
+ kref_get(&gamma->refcount);
+}
+
+static void dc_gamma_free(struct kref *kref)
+{
+ struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount);
+ kfree(gamma);
+}
+
+void dc_gamma_release(struct dc_gamma **gamma)
+{
+ kref_put(&(*gamma)->refcount, dc_gamma_free);
+ *gamma = NULL;
+}
+
+struct dc_gamma *dc_create_gamma(void)
+{
+ struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL);
+
+ if (gamma == NULL)
+ goto alloc_fail;
+
+ kref_init(&gamma->refcount);
+ return gamma;
+
+alloc_fail:
+ return NULL;
+}
+
+void dc_transfer_func_retain(struct dc_transfer_func *tf)
+{
+ kref_get(&tf->refcount);
+}
+
+static void dc_transfer_func_free(struct kref *kref)
+{
+ struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount);
+ kfree(tf);
+}
+
+void dc_transfer_func_release(struct dc_transfer_func *tf)
+{
+ kref_put(&tf->refcount, dc_transfer_func_free);
+}
+
+struct dc_transfer_func *dc_create_transfer_func(void)
+{
+ struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL);
+
+ if (tf == NULL)
+ goto alloc_fail;
+
+ kref_init(&tf->refcount);
+
+ return tf;
+
+alloc_fail:
+ return NULL;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
new file mode 100644
index 000000000000..9d8f4a55c74e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -0,0 +1,1103 @@
+/*
+ * Copyright 2012-14 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_INTERFACE_H_
+#define DC_INTERFACE_H_
+
+#include "dc_types.h"
+#include "grph_object_defs.h"
+#include "logger_types.h"
+#include "gpio_types.h"
+#include "link_service_types.h"
+#include "grph_object_ctrl_defs.h"
+#include <inc/hw/opp.h>
+
+#include "inc/hw_sequencer.h"
+#include "inc/compressor.h"
+#include "dml/display_mode_lib.h"
+
+#define DC_VER "3.1.07"
+
+#define MAX_SURFACES 3
+#define MAX_STREAMS 6
+#define MAX_SINKS_PER_LINK 4
+
+
+/*******************************************************************************
+ * Display Core Interfaces
+ ******************************************************************************/
+struct dc_caps {
+ uint32_t max_streams;
+ uint32_t max_links;
+ uint32_t max_audios;
+ uint32_t max_slave_planes;
+ uint32_t max_planes;
+ uint32_t max_downscale_ratio;
+ uint32_t i2c_speed_in_khz;
+ unsigned int max_cursor_size;
+ unsigned int max_video_width;
+ bool dcc_const_color;
+ bool dynamic_audio;
+};
+
+struct dc_dcc_surface_param {
+ struct dc_size surface_size;
+ enum surface_pixel_format format;
+ enum swizzle_mode_values swizzle_mode;
+ enum dc_scan_direction scan;
+};
+
+struct dc_dcc_setting {
+ unsigned int max_compressed_blk_size;
+ unsigned int max_uncompressed_blk_size;
+ bool independent_64b_blks;
+};
+
+struct dc_surface_dcc_cap {
+ union {
+ struct {
+ struct dc_dcc_setting rgb;
+ } grph;
+
+ struct {
+ struct dc_dcc_setting luma;
+ struct dc_dcc_setting chroma;
+ } video;
+ };
+
+ bool capable;
+ bool const_color_support;
+};
+
+struct dc_static_screen_events {
+ bool cursor_update;
+ bool surface_update;
+ bool overlay_update;
+};
+
+/* Forward declaration*/
+struct dc;
+struct dc_plane_state;
+struct dc_state;
+
+struct dc_cap_funcs {
+ bool (*get_dcc_compression_cap)(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output);
+};
+
+struct dc_stream_state_funcs {
+ bool (*adjust_vmin_vmax)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ int vmin,
+ int vmax);
+ bool (*get_crtc_position)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ unsigned int *v_pos,
+ unsigned int *nom_v_pos);
+
+ bool (*set_gamut_remap)(struct dc *dc,
+ const struct dc_stream_state *stream);
+
+ bool (*program_csc_matrix)(struct dc *dc,
+ struct dc_stream_state *stream);
+
+ void (*set_static_screen_events)(struct dc *dc,
+ struct dc_stream_state **stream,
+ int num_streams,
+ const struct dc_static_screen_events *events);
+
+ void (*set_dither_option)(struct dc_stream_state *stream,
+ enum dc_dither_option option);
+
+ void (*set_dpms)(struct dc *dc,
+ struct dc_stream_state *stream,
+ bool dpms_off);
+};
+
+struct link_training_settings;
+
+struct dc_link_funcs {
+ void (*set_drive_settings)(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link);
+ void (*perform_link_training)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+ void (*set_preferred_link_settings)(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link);
+ void (*enable_hpd)(const struct dc_link *link);
+ void (*disable_hpd)(const struct dc_link *link);
+ void (*set_test_pattern)(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+};
+
+/* Structure to hold configuration flags set by dm at dc creation. */
+struct dc_config {
+ bool gpu_vm_support;
+ bool disable_disp_pll_sharing;
+};
+
+enum dcc_option {
+ DCC_ENABLE = 0,
+ DCC_DISABLE = 1,
+ DCC_HALF_REQ_DISALBE = 2,
+};
+
+enum pipe_split_policy {
+ MPC_SPLIT_DYNAMIC = 0,
+ MPC_SPLIT_AVOID = 1,
+ MPC_SPLIT_AVOID_MULT_DISP = 2,
+};
+
+enum wm_report_mode {
+ WM_REPORT_DEFAULT = 0,
+ WM_REPORT_OVERRIDE = 1,
+};
+
+struct dc_debug {
+ bool surface_visual_confirm;
+ bool sanity_checks;
+ bool max_disp_clk;
+ bool surface_trace;
+ bool timing_trace;
+ bool clock_trace;
+ bool validation_trace;
+
+ /* stutter efficiency related */
+ bool disable_stutter;
+ bool use_max_lb;
+ enum dcc_option disable_dcc;
+ enum pipe_split_policy pipe_split_policy;
+ bool force_single_disp_pipe_split;
+ bool voltage_align_fclk;
+
+ bool disable_dfs_bypass;
+ bool disable_dpp_power_gate;
+ bool disable_hubp_power_gate;
+ bool disable_pplib_wm_range;
+ enum wm_report_mode pplib_wm_report_mode;
+ unsigned int min_disp_clk_khz;
+ int sr_exit_time_dpm0_ns;
+ int sr_enter_plus_exit_time_dpm0_ns;
+ int sr_exit_time_ns;
+ int sr_enter_plus_exit_time_ns;
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
+ int always_scale;
+ bool disable_pplib_clock_request;
+ bool disable_clock_gate;
+ bool disable_dmcu;
+ bool disable_psr;
+ bool force_abm_enable;
+ bool disable_hbup_pg;
+ bool disable_dpp_pg;
+ bool disable_stereo_support;
+ bool vsr_support;
+ bool performance_trace;
+};
+struct dc_state;
+struct resource_pool;
+struct dce_hwseq;
+struct dc {
+ struct dc_caps caps;
+ struct dc_cap_funcs cap_funcs;
+ struct dc_stream_state_funcs stream_funcs;
+ struct dc_link_funcs link_funcs;
+ struct dc_config config;
+ struct dc_debug debug;
+
+ struct dc_context *ctx;
+
+ uint8_t link_count;
+ struct dc_link *links[MAX_PIPES * 2];
+
+ struct dc_state *current_state;
+ struct resource_pool *res_pool;
+
+ /* Display Engine Clock levels */
+ struct dm_pp_clock_levels sclk_lvls;
+
+ /* Inputs into BW and WM calculations. */
+ struct bw_calcs_dceip *bw_dceip;
+ struct bw_calcs_vbios *bw_vbios;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+ struct display_mode_lib dml;
+#endif
+
+ /* HW functions */
+ struct hw_sequencer_funcs hwss;
+ struct dce_hwseq *hwseq;
+
+ /* temp store of dm_pp_display_configuration
+ * to compare to see if display config changed
+ */
+ struct dm_pp_display_configuration prev_display_config;
+
+ /* FBC compressor */
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ struct compressor *fbc_compressor;
+#endif
+};
+
+enum frame_buffer_mode {
+ FRAME_BUFFER_MODE_LOCAL_ONLY = 0,
+ FRAME_BUFFER_MODE_ZFB_ONLY,
+ FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL,
+} ;
+
+struct dchub_init_data {
+ int64_t zfb_phys_addr_base;
+ int64_t zfb_mc_base_addr;
+ uint64_t zfb_size_in_byte;
+ enum frame_buffer_mode fb_mode;
+ bool dchub_initialzied;
+ bool dchub_info_valid;
+};
+
+struct dc_init_data {
+ struct hw_asic_id asic_id;
+ void *driver; /* ctx */
+ struct cgs_device *cgs_device;
+
+ int num_virtual_links;
+ /*
+ * If 'vbios_override' not NULL, it will be called instead
+ * of the real VBIOS. Intended use is Diagnostics on FPGA.
+ */
+ struct dc_bios *vbios_override;
+ enum dce_environment dce_environment;
+
+ struct dc_config flags;
+ uint32_t log_mask;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint64_t fbc_gpu_addr;
+#endif
+};
+
+struct dc *dc_create(const struct dc_init_data *init_params);
+
+void dc_destroy(struct dc **dc);
+
+/*******************************************************************************
+ * Surface Interfaces
+ ******************************************************************************/
+
+enum {
+ TRANSFER_FUNC_POINTS = 1025
+};
+
+// Moved here from color module for linux
+enum color_transfer_func {
+ transfer_func_unknown,
+ transfer_func_srgb,
+ transfer_func_bt709,
+ transfer_func_pq2084,
+ transfer_func_pq2084_interim,
+ transfer_func_linear_0_1,
+ transfer_func_linear_0_125,
+ transfer_func_dolbyvision,
+ transfer_func_gamma_22,
+ transfer_func_gamma_26
+};
+
+enum color_color_space {
+ color_space_unsupported,
+ color_space_srgb,
+ color_space_bt601,
+ color_space_bt709,
+ color_space_xv_ycc_bt601,
+ color_space_xv_ycc_bt709,
+ color_space_xr_rgb,
+ color_space_bt2020,
+ color_space_adobe,
+ color_space_dci_p3,
+ color_space_sc_rgb_ms_ref,
+ color_space_display_native,
+ color_space_app_ctrl,
+ color_space_dolby_vision,
+ color_space_custom_coordinates
+};
+
+struct dc_hdr_static_metadata {
+ /* display chromaticities and white point in units of 0.00001 */
+ unsigned int chromaticity_green_x;
+ unsigned int chromaticity_green_y;
+ unsigned int chromaticity_blue_x;
+ unsigned int chromaticity_blue_y;
+ unsigned int chromaticity_red_x;
+ unsigned int chromaticity_red_y;
+ unsigned int chromaticity_white_point_x;
+ unsigned int chromaticity_white_point_y;
+
+ uint32_t min_luminance;
+ uint32_t max_luminance;
+ uint32_t maximum_content_light_level;
+ uint32_t maximum_frame_average_light_level;
+
+ bool hdr_supported;
+ bool is_hdr;
+};
+
+enum dc_transfer_func_type {
+ TF_TYPE_PREDEFINED,
+ TF_TYPE_DISTRIBUTED_POINTS,
+ TF_TYPE_BYPASS
+};
+
+struct dc_transfer_func_distributed_points {
+ struct fixed31_32 red[TRANSFER_FUNC_POINTS];
+ struct fixed31_32 green[TRANSFER_FUNC_POINTS];
+ struct fixed31_32 blue[TRANSFER_FUNC_POINTS];
+
+ uint16_t end_exponent;
+ uint16_t x_point_at_y1_red;
+ uint16_t x_point_at_y1_green;
+ uint16_t x_point_at_y1_blue;
+};
+
+enum dc_transfer_func_predefined {
+ TRANSFER_FUNCTION_SRGB,
+ TRANSFER_FUNCTION_BT709,
+ TRANSFER_FUNCTION_PQ,
+ TRANSFER_FUNCTION_LINEAR,
+};
+
+struct dc_transfer_func {
+ struct kref refcount;
+ struct dc_transfer_func_distributed_points tf_pts;
+ enum dc_transfer_func_type type;
+ enum dc_transfer_func_predefined tf;
+ struct dc_context *ctx;
+};
+
+/*
+ * This structure is filled in by dc_surface_get_status and contains
+ * the last requested address and the currently active address so the called
+ * can determine if there are any outstanding flips
+ */
+struct dc_plane_status {
+ struct dc_plane_address requested_address;
+ struct dc_plane_address current_address;
+ bool is_flip_pending;
+ bool is_right_eye;
+};
+
+struct dc_plane_state {
+ struct dc_plane_address address;
+ struct scaling_taps scaling_quality;
+ struct rect src_rect;
+ struct rect dst_rect;
+ struct rect clip_rect;
+
+ union plane_size plane_size;
+ union dc_tiling_info tiling_info;
+
+ struct dc_plane_dcc_param dcc;
+ struct dc_hdr_static_metadata hdr_static_ctx;
+
+ struct dc_gamma *gamma_correction;
+ struct dc_transfer_func *in_transfer_func;
+
+ // sourceContentAttribute cache
+ bool is_source_input_valid;
+ struct dc_hdr_static_metadata source_input_mastering_info;
+ enum color_color_space source_input_color_space;
+ enum color_transfer_func source_input_tf;
+
+ enum dc_color_space color_space;
+ enum surface_pixel_format format;
+ enum dc_rotation_angle rotation;
+ enum plane_stereo_format stereo_format;
+
+ bool per_pixel_alpha;
+ bool visible;
+ bool flip_immediate;
+ bool horizontal_mirror;
+
+ /* private to DC core */
+ struct dc_plane_status status;
+ struct dc_context *ctx;
+
+ /* private to dc_surface.c */
+ enum dc_irq_source irq_source;
+ struct kref refcount;
+};
+
+struct dc_plane_info {
+ union plane_size plane_size;
+ union dc_tiling_info tiling_info;
+ struct dc_plane_dcc_param dcc;
+ enum surface_pixel_format format;
+ enum dc_rotation_angle rotation;
+ enum plane_stereo_format stereo_format;
+ enum dc_color_space color_space; /*todo: wrong place, fits in scaling info*/
+ bool horizontal_mirror;
+ bool visible;
+ bool per_pixel_alpha;
+};
+
+struct dc_scaling_info {
+ struct rect src_rect;
+ struct rect dst_rect;
+ struct rect clip_rect;
+ struct scaling_taps scaling_quality;
+};
+
+struct dc_surface_update {
+ struct dc_plane_state *surface;
+
+ /* isr safe update parameters. null means no updates */
+ struct dc_flip_addrs *flip_addr;
+ struct dc_plane_info *plane_info;
+ struct dc_scaling_info *scaling_info;
+ /* following updates require alloc/sleep/spin that is not isr safe,
+ * null means no updates
+ */
+ /* gamma TO BE REMOVED */
+ struct dc_gamma *gamma;
+ struct dc_transfer_func *in_transfer_func;
+ struct dc_hdr_static_metadata *hdr_static_metadata;
+};
+
+/*
+ * Create a new surface with default parameters;
+ */
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state);
+
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+void dc_gamma_retain(struct dc_gamma *dc_gamma);
+void dc_gamma_release(struct dc_gamma **dc_gamma);
+struct dc_gamma *dc_create_gamma(void);
+
+void dc_transfer_func_retain(struct dc_transfer_func *dc_tf);
+void dc_transfer_func_release(struct dc_transfer_func *dc_tf);
+struct dc_transfer_func *dc_create_transfer_func(void);
+
+/*
+ * This structure holds a surface address. There could be multiple addresses
+ * in cases such as Stereo 3D, Planar YUV, etc. Other per-flip attributes such
+ * as frame durations and DCC format can also be set.
+ */
+struct dc_flip_addrs {
+ struct dc_plane_address address;
+ bool flip_immediate;
+ /* TODO: add flip duration for FreeSync */
+};
+
+bool dc_post_update_surfaces_to_stream(
+ struct dc *dc);
+
+/* Surface update type is used by dc_update_surfaces_and_stream
+ * The update type is determined at the very beginning of the function based
+ * on parameters passed in and decides how much programming (or updating) is
+ * going to be done during the call.
+ *
+ * UPDATE_TYPE_FAST is used for really fast updates that do not require much
+ * logical calculations or hardware register programming. This update MUST be
+ * ISR safe on windows. Currently fast update will only be used to flip surface
+ * address.
+ *
+ * UPDATE_TYPE_MED is used for slower updates which require significant hw
+ * re-programming however do not affect bandwidth consumption or clock
+ * requirements. At present, this is the level at which front end updates
+ * that do not require us to run bw_calcs happen. These are in/out transfer func
+ * updates, viewport offset changes, recout size changes and pixel depth changes.
+ * This update can be done at ISR, but we want to minimize how often this happens.
+ *
+ * UPDATE_TYPE_FULL is slow. Really slow. This requires us to recalculate our
+ * bandwidth and clocks, possibly rearrange some pipes and reprogram anything front
+ * end related. Any time viewport dimensions, recout dimensions, scaling ratios or
+ * gamma need to be adjusted or pipe needs to be turned on (or disconnected) we do
+ * a full update. This cannot be done at ISR level and should be a rare event.
+ * Unless someone is stress testing mpo enter/exit, playing with colour or adjusting
+ * underscan we don't expect to see this call at all.
+ */
+
+enum surface_update_type {
+ UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */
+ UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/
+ UPDATE_TYPE_FULL, /* may need to shuffle resources */
+};
+
+/*******************************************************************************
+ * Stream Interfaces
+ ******************************************************************************/
+
+struct dc_stream_status {
+ int primary_otg_inst;
+ int stream_enc_inst;
+ int plane_count;
+ struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
+
+ /*
+ * link this stream passes through
+ */
+ struct dc_link *link;
+};
+
+struct dc_stream_state {
+ struct dc_sink *sink;
+ struct dc_crtc_timing timing;
+
+ struct rect src; /* composition area */
+ struct rect dst; /* stream addressable area */
+
+ struct audio_info audio_info;
+
+ struct freesync_context freesync_ctx;
+
+ struct dc_transfer_func *out_transfer_func;
+ struct colorspace_transform gamut_remap_matrix;
+ struct csc_transform csc_color_matrix;
+
+ enum signal_type output_signal;
+
+ enum dc_color_space output_color_space;
+ enum dc_dither_option dither_option;
+
+ enum view_3d_format view_format;
+
+ bool ignore_msa_timing_param;
+ /* TODO: custom INFO packets */
+ /* TODO: ABM info (DMCU) */
+ /* TODO: PSR info */
+ /* TODO: CEA VIC */
+
+ /* from core_stream struct */
+ struct dc_context *ctx;
+
+ /* used by DCP and FMT */
+ struct bit_depth_reduction_params bit_depth_params;
+ struct clamping_and_pixel_encoding_params clamping;
+
+ int phy_pix_clk;
+ enum signal_type signal;
+ bool dpms_off;
+
+ struct dc_stream_status status;
+
+ struct dc_cursor_attributes cursor_attributes;
+
+ /* from stream struct */
+ struct kref refcount;
+};
+
+struct dc_stream_update {
+ struct rect src;
+ struct rect dst;
+ struct dc_transfer_func *out_transfer_func;
+};
+
+bool dc_is_stream_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+bool dc_is_stream_scaling_unchanged(
+ struct dc_stream_state *old_stream, struct dc_stream_state *stream);
+
+/*
+ * Set up surface attributes and associate to a stream
+ * The surfaces parameter is an absolute set of all surface active for the stream.
+ * If no surfaces are provided, the stream will be blanked; no memory read.
+ * Any flip related attribute changes must be done through this interface.
+ *
+ * After this call:
+ * Surfaces attributes are programmed and configured to be composed into stream.
+ * This does not trigger a flip. No surface address is programmed.
+ */
+
+bool dc_commit_planes_to_stream(
+ struct dc *dc,
+ struct dc_plane_state **plane_states,
+ uint8_t new_plane_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *state);
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_plane_state **plane_states,
+ struct dc_state *state);
+/*
+ * Log the current stream state.
+ */
+void dc_stream_log(
+ const struct dc_stream_state *stream,
+ struct dal_logger *dc_logger,
+ enum dc_log_type log_type);
+
+uint8_t dc_get_current_stream_count(struct dc *dc);
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
+
+/*
+ * Return the current frame counter.
+ */
+uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+
+/* TODO: Return parsed values rather than direct register read
+ * This has a dependency on the caller (amdgpu_get_crtc_scanoutpos)
+ * being refactored properly to be dce-specific
+ */
+bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+enum dc_status dc_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+enum dc_status dc_remove_stream_from_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *stream);
+
+
+bool dc_add_plane_to_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_remove_plane_from_context(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+
+bool dc_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context);
+
+bool dc_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *context);
+
+/*
+ * Structure to store surface/stream associations for validation
+ */
+struct dc_validation_set {
+ struct dc_stream_state *stream;
+ struct dc_plane_state *plane_states[MAX_SURFACES];
+ uint8_t plane_count;
+};
+
+enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream);
+
+enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state);
+
+enum dc_status dc_validate_global_state(
+ struct dc *dc,
+ struct dc_state *new_ctx);
+
+/*
+ * This function takes a stream and checks if it is guaranteed to be supported.
+ * Guaranteed means that MAX_COFUNC similar streams are supported.
+ *
+ * After this call:
+ * No hardware is programmed for call. Only validation is done.
+ */
+
+
+void dc_resource_state_construct(
+ const struct dc *dc,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_copy_construct_current(
+ const struct dc *dc,
+ struct dc_state *dst_ctx);
+
+void dc_resource_state_destruct(struct dc_state *context);
+
+/*
+ * TODO update to make it about validation sets
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+bool dc_commit_state(struct dc *dc, struct dc_state *context);
+
+/*
+ * Set up streams and links associated to drive sinks
+ * The streams parameter is an absolute set of all active streams.
+ *
+ * After this call:
+ * Phy, Encoder, Timing Generator are programmed and enabled.
+ * New streams are enabled with blank stream; no memory read.
+ */
+/*
+ * Enable stereo when commit_streams is not required,
+ * for example, frame alternate.
+ */
+bool dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count);
+
+/**
+ * Create a new default stream for the requested sink
+ */
+struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+
+void dc_stream_retain(struct dc_stream_state *dc_stream);
+void dc_stream_release(struct dc_stream_state *dc_stream);
+
+struct dc_stream_status *dc_stream_get_status(
+ struct dc_stream_state *dc_stream);
+
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status);
+
+
+struct dc_state *dc_create_state(void);
+void dc_retain_state(struct dc_state *context);
+void dc_release_state(struct dc_state *context);
+
+/*******************************************************************************
+ * Link Interfaces
+ ******************************************************************************/
+
+struct dpcd_caps {
+ union dpcd_rev dpcd_rev;
+ union max_lane_count max_ln_count;
+ union max_down_spread max_down_spread;
+
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ /* Dongle's downstream count. */
+ union sink_count sink_count;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ struct dc_dongle_caps dongle_caps;
+
+ uint32_t sink_dev_id;
+ uint32_t branch_dev_id;
+ int8_t branch_dev_name[6];
+ int8_t branch_hw_revision;
+
+ bool allow_invalid_MSA_timing_param;
+ bool panel_mode_edp;
+ bool dpcd_display_control_capable;
+};
+
+struct dc_link_status {
+ struct dpcd_caps *dpcd_caps;
+};
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct link_mst_stream_allocation {
+ /* DIG front */
+ const struct stream_encoder *stream_enc;
+ /* associate DRM payload table with DC stream encoder */
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in transport packet */
+ uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct link_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+/*
+ * A link contains one or more sinks and their connected status.
+ * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
+ */
+struct dc_link {
+ struct dc_sink *remote_sinks[MAX_SINKS_PER_LINK];
+ unsigned int sink_count;
+ struct dc_sink *local_sink;
+ unsigned int link_index;
+ enum dc_connection_type type;
+ enum signal_type connector_signal;
+ enum dc_irq_source irq_source_hpd;
+ enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
+ /* caps is the same as reported_link_cap. link_traing use
+ * reported_link_cap. Will clean up. TODO
+ */
+ struct dc_link_settings reported_link_cap;
+ struct dc_link_settings verified_link_cap;
+ struct dc_link_settings cur_link_settings;
+ struct dc_lane_settings cur_lane_setting;
+ struct dc_link_settings preferred_link_setting;
+
+ uint8_t ddc_hw_inst;
+
+ uint8_t hpd_src;
+
+ uint8_t link_enc_hw_inst;
+
+ bool test_pattern_enabled;
+ union compliance_test_state compliance_test_state;
+
+ void *priv;
+
+ struct ddc_service *ddc;
+
+ bool aux_mode;
+
+ /* Private to DC core */
+
+ const struct dc *dc;
+
+ struct dc_context *ctx;
+
+ struct link_encoder *link_enc;
+ struct graphics_object_id link_id;
+ union ddi_channel_mapping ddi_channel_mapping;
+ struct connector_device_tag_info device_tag;
+ struct dpcd_caps dpcd_caps;
+ unsigned short chip_caps;
+ unsigned int dpcd_sink_count;
+ enum edp_revision edp_revision;
+ bool psr_enabled;
+
+ /* MST record stream using this link */
+ struct link_flags {
+ bool dp_keep_receiver_powered;
+ } wa_flags;
+ struct link_mst_stream_allocation_table mst_stream_alloc_table;
+
+ struct dc_link_status link_status;
+
+};
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
+
+/*
+ * Return an enumerated dc_link. dc_link order is constant and determined at
+ * boot time. They cannot be created or destroyed.
+ * Use dc_get_caps() to get number of links.
+ */
+static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+ return dc->links[link_index];
+}
+
+/* Set backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
+ uint32_t frame_ramp, const struct dc_stream_state *stream);
+
+bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
+
+bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state);
+
+bool dc_link_setup_psr(struct dc_link *dc_link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context);
+
+/* Request DC to detect if there is a Panel connected.
+ * boot - If this call is during initial boot.
+ * Return false for any type of detection failure or MST detection
+ * true otherwise. True meaning further action is required (status update
+ * and OS notification).
+ */
+enum dc_detect_reason {
+ DETECT_REASON_BOOT,
+ DETECT_REASON_HPD,
+ DETECT_REASON_HPDRX,
+};
+
+bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
+
+/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
+ * Return:
+ * true - Downstream port status changed. DM should call DC to do the
+ * detection.
+ * false - no change in Downstream port status. No further action required
+ * from DM. */
+bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+
+struct dc_sink_init_data;
+
+struct dc_sink *dc_link_add_remote_sink(
+ struct dc_link *dc_link,
+ const uint8_t *edid,
+ int len,
+ struct dc_sink_init_data *init_data);
+
+void dc_link_remove_remote_sink(
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+/* Used by diagnostics for virtual link at the moment */
+
+void dc_link_dp_set_drive_settings(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+enum link_training_result dc_link_dp_perform_link_training(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+void dc_link_dp_enable_hpd(const struct dc_link *link);
+
+void dc_link_dp_disable_hpd(const struct dc_link *link);
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size);
+
+/*******************************************************************************
+ * Sink Interfaces - A sink corresponds to a display output device
+ ******************************************************************************/
+
+struct dc_container_id {
+ // 128bit GUID in binary form
+ unsigned char guid[16];
+ // 8 byte port ID -> ELD.PortID
+ unsigned int portId[2];
+ // 128bit GUID in binary formufacturer name -> ELD.ManufacturerName
+ unsigned short manufacturerName;
+ // 2 byte product code -> ELD.ProductCode
+ unsigned short productCode;
+};
+
+
+
+/*
+ * The sink structure contains EDID and other display device properties
+ */
+struct dc_sink {
+ enum signal_type sink_signal;
+ struct dc_edid dc_edid; /* raw edid */
+ struct dc_edid_caps edid_caps; /* parse display caps */
+ struct dc_container_id *dc_container_id;
+ uint32_t dongle_max_pix_clk;
+ void *priv;
+ struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
+ bool converter_disable_audio;
+
+ /* private to DC core */
+ struct dc_link *link;
+ struct dc_context *ctx;
+
+ /* private to dc_sink.c */
+ struct kref refcount;
+};
+
+void dc_sink_retain(struct dc_sink *sink);
+void dc_sink_release(struct dc_sink *sink);
+
+struct dc_sink_init_data {
+ enum signal_type sink_signal;
+ struct dc_link *link;
+ uint32_t dongle_max_pix_clk;
+ bool converter_disable_audio;
+};
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
+
+/*******************************************************************************
+ * Cursor interfaces - To manages the cursor within a stream
+ ******************************************************************************/
+/* TODO: Deprecated once we switch to dc_set_cursor_position */
+bool dc_stream_set_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes);
+
+bool dc_stream_set_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position);
+
+/* Newer interfaces */
+struct dc_cursor {
+ struct dc_plane_address address;
+ struct dc_cursor_attributes attributes;
+};
+
+/*******************************************************************************
+ * Interrupt interfaces
+ ******************************************************************************/
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id);
+void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
+enum dc_irq_source dc_get_hpd_irq_source_at_index(
+ struct dc *dc, uint32_t link_index);
+
+/*******************************************************************************
+ * Power Interfaces
+ ******************************************************************************/
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state);
+void dc_resume(struct dc *dc);
+
+/*
+ * DPCD access interfaces
+ */
+
+bool dc_submit_i2c(
+ struct dc *dc,
+ uint32_t link_index,
+ struct i2c_command *cmd);
+
+
+#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
new file mode 100644
index 000000000000..273d80a4ebce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_BIOS_TYPES_H
+#define DC_BIOS_TYPES_H
+
+/******************************************************************************
+ * Interface file for VBIOS implementations.
+ *
+ * The default implementation is inside DC.
+ * Display Manager (which instantiates DC) has the option to supply it's own
+ * (external to DC) implementation of VBIOS, which will be called by DC, using
+ * this interface.
+ * (The intended use is Diagnostics, but other uses may appear.)
+ *****************************************************************************/
+
+#include "include/bios_parser_types.h"
+
+struct dc_vbios_funcs {
+ uint8_t (*get_connectors_number)(struct dc_bios *bios);
+
+ struct graphics_object_id (*get_encoder_id)(
+ struct dc_bios *bios,
+ uint32_t i);
+ struct graphics_object_id (*get_connector_id)(
+ struct dc_bios *bios,
+ uint8_t connector_index);
+ uint32_t (*get_dst_number)(
+ struct dc_bios *bios,
+ struct graphics_object_id id);
+
+ enum bp_result (*get_src_obj)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *src_object_id);
+ enum bp_result (*get_dst_obj)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id, uint32_t index,
+ struct graphics_object_id *dest_object_id);
+
+ enum bp_result (*get_i2c_info)(
+ struct dc_bios *dcb,
+ struct graphics_object_id id,
+ struct graphics_object_i2c_info *info);
+
+ enum bp_result (*get_voltage_ddc_info)(
+ struct dc_bios *bios,
+ uint32_t index,
+ struct graphics_object_i2c_info *info);
+ enum bp_result (*get_thermal_ddc_info)(
+ struct dc_bios *bios,
+ uint32_t i2c_channel_id,
+ struct graphics_object_i2c_info *info);
+ enum bp_result (*get_hpd_info)(
+ struct dc_bios *bios,
+ struct graphics_object_id id,
+ struct graphics_object_hpd_info *info);
+ enum bp_result (*get_device_tag)(
+ struct dc_bios *bios,
+ struct graphics_object_id connector_object_id,
+ uint32_t device_tag_index,
+ struct connector_device_tag_info *info);
+ enum bp_result (*get_firmware_info)(
+ struct dc_bios *bios,
+ struct dc_firmware_info *info);
+ enum bp_result (*get_spread_spectrum_info)(
+ struct dc_bios *bios,
+ enum as_signal_type signal,
+ uint32_t index,
+ struct spread_spectrum_info *ss_info);
+ uint32_t (*get_ss_entry_number)(
+ struct dc_bios *bios,
+ enum as_signal_type signal);
+ enum bp_result (*get_embedded_panel_info)(
+ struct dc_bios *bios,
+ struct embedded_panel_info *info);
+ enum bp_result (*get_gpio_pin_info)(
+ struct dc_bios *bios,
+ uint32_t gpio_id,
+ struct gpio_pin_info *info);
+ enum bp_result (*get_encoder_cap_info)(
+ struct dc_bios *bios,
+ struct graphics_object_id object_id,
+ struct bp_encoder_cap_info *info);
+
+ bool (*is_lid_status_changed)(
+ struct dc_bios *bios);
+ bool (*is_display_config_changed)(
+ struct dc_bios *bios);
+ bool (*is_accelerated_mode)(
+ struct dc_bios *bios);
+ void (*get_bios_event_info)(
+ struct dc_bios *bios,
+ struct bios_event_info *info);
+ void (*update_requested_backlight_level)(
+ struct dc_bios *bios,
+ uint32_t backlight_8bit);
+ uint32_t (*get_requested_backlight_level)(
+ struct dc_bios *bios);
+ void (*take_backlight_control)(
+ struct dc_bios *bios,
+ bool cntl);
+
+ bool (*is_active_display)(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag);
+ enum controller_id (*get_embedded_display_controller_id)(
+ struct dc_bios *bios);
+ uint32_t (*get_embedded_display_refresh_rate)(
+ struct dc_bios *bios);
+
+ void (*set_scratch_critical_state)(
+ struct dc_bios *bios,
+ bool state);
+ bool (*is_device_id_supported)(
+ struct dc_bios *bios,
+ struct device_id id);
+
+ /* COMMANDS */
+
+ enum bp_result (*encoder_control)(
+ struct dc_bios *bios,
+ struct bp_encoder_control *cntl);
+ enum bp_result (*transmitter_control)(
+ struct dc_bios *bios,
+ struct bp_transmitter_control *cntl);
+ enum bp_result (*crt_control)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ bool enable,
+ uint32_t pixel_clock);
+ enum bp_result (*enable_crtc)(
+ struct dc_bios *bios,
+ enum controller_id id,
+ bool enable);
+ enum bp_result (*adjust_pixel_clock)(
+ struct dc_bios *bios,
+ struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*set_pixel_clock)(
+ struct dc_bios *bios,
+ struct bp_pixel_clock_parameters *bp_params);
+ enum bp_result (*set_dce_clock)(
+ struct dc_bios *bios,
+ struct bp_set_dce_clock_parameters *bp_params);
+ unsigned int (*get_smu_clock_info)(
+ struct dc_bios *bios);
+ enum bp_result (*enable_spread_spectrum_on_ppll)(
+ struct dc_bios *bios,
+ struct bp_spread_spectrum_parameters *bp_params,
+ bool enable);
+ enum bp_result (*program_crtc_timing)(
+ struct dc_bios *bios,
+ struct bp_hw_crtc_timing_parameters *bp_params);
+
+ enum bp_result (*crtc_source_select)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
+ enum bp_result (*program_display_engine_pll)(
+ struct dc_bios *bios,
+ struct bp_pixel_clock_parameters *bp_params);
+
+ enum signal_type (*dac_load_detect)(
+ struct dc_bios *bios,
+ struct graphics_object_id encoder,
+ struct graphics_object_id connector,
+ enum signal_type display_signal);
+
+ enum bp_result (*enable_disp_power_gating)(
+ struct dc_bios *bios,
+ enum controller_id controller_id,
+ enum bp_pipe_control_action action);
+
+ void (*post_init)(struct dc_bios *bios);
+
+ void (*bios_parser_destroy)(struct dc_bios **dcb);
+};
+
+struct bios_registers {
+ uint32_t BIOS_SCRATCH_6;
+};
+
+struct dc_bios {
+ const struct dc_vbios_funcs *funcs;
+
+ uint8_t *bios;
+ uint32_t bios_size;
+
+ uint8_t *bios_local_image;
+
+ struct dc_context *ctx;
+ const struct bios_registers *regs;
+ struct integrated_info *integrated_info;
+};
+
+#endif /* DC_BIOS_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
new file mode 100644
index 000000000000..e1affeb5cc51
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_DDC_TYPES_H_
+#define DC_DDC_TYPES_H_
+
+struct i2c_payload {
+ bool write;
+ uint8_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2c_command_engine {
+ I2C_COMMAND_ENGINE_DEFAULT,
+ I2C_COMMAND_ENGINE_SW,
+ I2C_COMMAND_ENGINE_HW
+};
+
+struct i2c_command {
+ struct i2c_payload *payloads;
+ uint8_t number_of_payloads;
+
+ enum i2c_command_engine engine;
+
+ /* expressed in KHz
+ * zero means "use default value" */
+ uint32_t speed;
+};
+
+struct gpio_ddc_hw_info {
+ bool hw_supported;
+ uint32_t ddc_channel;
+};
+
+struct ddc {
+ struct gpio *pin_data;
+ struct gpio *pin_clock;
+ struct gpio_ddc_hw_info hw_info;
+ struct dc_context *ctx;
+};
+
+union ddc_wa {
+ struct {
+ uint32_t DP_SKIP_POWER_OFF:1;
+ uint32_t DP_AUX_POWER_UP_WA_DELAY:1;
+ } bits;
+ uint32_t raw;
+};
+
+struct ddc_flags {
+ uint8_t EDID_QUERY_DONE_ONCE:1;
+ uint8_t IS_INTERNAL_DISPLAY:1;
+ uint8_t FORCE_READ_REPEATED_START:1;
+ uint8_t EDID_STRESS_READ:1;
+
+};
+
+enum ddc_transaction_type {
+ DDC_TRANSACTION_TYPE_NONE = 0,
+ DDC_TRANSACTION_TYPE_I2C,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER,
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER
+};
+
+enum display_dongle_type {
+ DISPLAY_DONGLE_NONE = 0,
+ /* Active converter types*/
+ DISPLAY_DONGLE_DP_VGA_CONVERTER,
+ DISPLAY_DONGLE_DP_DVI_CONVERTER,
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ /* DP-HDMI/DVI passive dongles (Type 1 and Type 2)*/
+ DISPLAY_DONGLE_DP_DVI_DONGLE,
+ DISPLAY_DONGLE_DP_HDMI_DONGLE,
+ /* Other types of dongle*/
+ DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
+};
+
+struct ddc_service {
+ struct ddc *ddc_pin;
+ struct ddc_flags flags;
+ union ddc_wa wa;
+ enum ddc_transaction_type transaction_type;
+ enum display_dongle_type dongle_type;
+ struct dc_context *ctx;
+ struct dc_link *link;
+
+ uint32_t address;
+ uint32_t edid_buf_len;
+ uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+};
+
+#endif /* DC_DDC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
new file mode 100644
index 000000000000..77e2de69cca3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_DP_TYPES_H
+#define DC_DP_TYPES_H
+
+enum dc_lane_count {
+ LANE_COUNT_UNKNOWN = 0,
+ LANE_COUNT_ONE = 1,
+ LANE_COUNT_TWO = 2,
+ LANE_COUNT_FOUR = 4,
+ LANE_COUNT_EIGHT = 8,
+ LANE_COUNT_DP_MAX = LANE_COUNT_FOUR
+};
+
+/* This is actually a reference clock (27MHz) multiplier
+ * 162MBps bandwidth for 1.62GHz like rate,
+ * 270MBps for 2.70GHz,
+ * 324MBps for 3.24Ghz,
+ * 540MBps for 5.40GHz
+ * 810MBps for 8.10GHz
+ */
+enum dc_link_rate {
+ LINK_RATE_UNKNOWN = 0,
+ LINK_RATE_LOW = 0x06,
+ LINK_RATE_HIGH = 0x0A,
+ LINK_RATE_RBR2 = 0x0C,
+ LINK_RATE_HIGH2 = 0x14,
+ LINK_RATE_HIGH3 = 0x1E
+};
+
+enum dc_link_spread {
+ LINK_SPREAD_DISABLED = 0x00,
+ /* 0.5 % downspread 30 kHz */
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ = 0x10,
+ /* 0.5 % downspread 33 kHz */
+ LINK_SPREAD_05_DOWNSPREAD_33KHZ = 0x11
+};
+
+enum dc_voltage_swing {
+ VOLTAGE_SWING_LEVEL0 = 0, /* direct HW translation! */
+ VOLTAGE_SWING_LEVEL1,
+ VOLTAGE_SWING_LEVEL2,
+ VOLTAGE_SWING_LEVEL3,
+ VOLTAGE_SWING_MAX_LEVEL = VOLTAGE_SWING_LEVEL3
+};
+
+enum dc_pre_emphasis {
+ PRE_EMPHASIS_DISABLED = 0, /* direct HW translation! */
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_MAX_LEVEL = PRE_EMPHASIS_LEVEL3
+};
+/* Post Cursor 2 is optional for transmitter
+ * and it applies only to the main link operating at HBR2
+ */
+enum dc_post_cursor2 {
+ POST_CURSOR2_DISABLED = 0, /* direct HW translation! */
+ POST_CURSOR2_LEVEL1,
+ POST_CURSOR2_LEVEL2,
+ POST_CURSOR2_LEVEL3,
+ POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
+};
+
+struct dc_link_settings {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+ enum dc_link_spread link_spread;
+};
+
+struct dc_lane_settings {
+ enum dc_voltage_swing VOLTAGE_SWING;
+ enum dc_pre_emphasis PRE_EMPHASIS;
+ enum dc_post_cursor2 POST_CURSOR2;
+};
+
+struct dc_link_training_settings {
+ struct dc_link_settings link;
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+};
+
+
+union dpcd_rev {
+ struct {
+ uint8_t MINOR:4;
+ uint8_t MAJOR:4;
+ } bits;
+ uint8_t raw;
+};
+
+union max_lane_count {
+ struct {
+ uint8_t MAX_LANE_COUNT:5;
+ uint8_t POST_LT_ADJ_REQ_SUPPORTED:1;
+ uint8_t TPS3_SUPPORTED:1;
+ uint8_t ENHANCED_FRAME_CAP:1;
+ } bits;
+ uint8_t raw;
+};
+
+union max_down_spread {
+ struct {
+ uint8_t MAX_DOWN_SPREAD:1;
+ uint8_t RESERVED:5;
+ uint8_t NO_AUX_HANDSHAKE_LINK_TRAINING:1;
+ uint8_t TPS4_SUPPORTED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union mstm_cap {
+ struct {
+ uint8_t MST_CAP:1;
+ uint8_t RESERVED:7;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_count_set {
+ struct {
+ uint8_t LANE_COUNT_SET:5;
+ uint8_t POST_LT_ADJ_REQ_GRANTED:1;
+ uint8_t RESERVED:1;
+ uint8_t ENHANCED_FRAMING:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_status {
+ struct {
+ uint8_t CR_DONE_0:1;
+ uint8_t CHANNEL_EQ_DONE_0:1;
+ uint8_t SYMBOL_LOCKED_0:1;
+ uint8_t RESERVED0:1;
+ uint8_t CR_DONE_1:1;
+ uint8_t CHANNEL_EQ_DONE_1:1;
+ uint8_t SYMBOL_LOCKED_1:1;
+ uint8_t RESERVED_1:1;
+ } bits;
+ uint8_t raw;
+};
+
+union device_service_irq {
+ struct {
+ uint8_t REMOTE_CONTROL_CMD_PENDING:1;
+ uint8_t AUTOMATED_TEST:1;
+ uint8_t CP_IRQ:1;
+ uint8_t MCCS_IRQ:1;
+ uint8_t DOWN_REP_MSG_RDY:1;
+ uint8_t UP_REQ_MSG_RDY:1;
+ uint8_t SINK_SPECIFIC:1;
+ uint8_t reserved:1;
+ } bits;
+ uint8_t raw;
+};
+
+union sink_count {
+ struct {
+ uint8_t SINK_COUNT:6;
+ uint8_t CPREADY:1;
+ uint8_t RESERVED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_align_status_updated {
+ struct {
+ uint8_t INTERLANE_ALIGN_DONE:1;
+ uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
+ uint8_t RESERVED:4;
+ uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
+ uint8_t LINK_STATUS_UPDATED:1;
+ } bits;
+ uint8_t raw;
+};
+
+union lane_adjust {
+ struct {
+ uint8_t VOLTAGE_SWING_LANE:2;
+ uint8_t PRE_EMPHASIS_LANE:2;
+ uint8_t RESERVED:4;
+ } bits;
+ uint8_t raw;
+};
+
+union dpcd_training_pattern {
+ struct {
+ uint8_t TRAINING_PATTERN_SET:4;
+ uint8_t RECOVERED_CLOCK_OUT_EN:1;
+ uint8_t SCRAMBLING_DISABLE:1;
+ uint8_t SYMBOL_ERROR_COUNT_SEL:2;
+ } v1_4;
+ struct {
+ uint8_t TRAINING_PATTERN_SET:2;
+ uint8_t LINK_QUAL_PATTERN_SET:2;
+ uint8_t RESERVED:4;
+ } v1_3;
+ uint8_t raw;
+};
+
+/* Training Lane is used to configure downstream DP device's voltage swing
+and pre-emphasis levels*/
+/* The DPCD addresses are from 0x103 to 0x106*/
+union dpcd_training_lane {
+ struct {
+ uint8_t VOLTAGE_SWING_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ uint8_t RESERVED:2;
+ } bits;
+ uint8_t raw;
+};
+
+/* TMDS-converter related */
+union dwnstream_port_caps_byte0 {
+ struct {
+ uint8_t DWN_STRM_PORTX_TYPE:3;
+ uint8_t DWN_STRM_PORTX_HPD:1;
+ uint8_t RESERVERD:4;
+ } bits;
+ uint8_t raw;
+};
+
+/* these are the detailed types stored at DWN_STRM_PORTX_CAP (00080h)*/
+enum dpcd_downstream_port_detailed_type {
+ DOWN_STREAM_DETAILED_DP = 0,
+ DOWN_STREAM_DETAILED_VGA,
+ DOWN_STREAM_DETAILED_DVI,
+ DOWN_STREAM_DETAILED_HDMI,
+ DOWN_STREAM_DETAILED_NONDDC,/* has no EDID (TV,CV)*/
+ DOWN_STREAM_DETAILED_DP_PLUS_PLUS
+};
+
+union dwnstream_port_caps_byte1 {
+ struct {
+ uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_downstream_port_present {
+ uint8_t byte;
+ struct {
+ uint8_t PORT_PRESENT:1;
+ uint8_t PORT_TYPE:2;
+ uint8_t FMT_CONVERSION:1;
+ uint8_t DETAILED_CAPS:1;
+ uint8_t RESERVED:3;
+ } fields;
+};
+
+union dwnstream_port_caps_byte3_dvi {
+ struct {
+ uint8_t RESERVED1:1;
+ uint8_t DUAL_LINK:1;
+ uint8_t HIGH_COLOR_DEPTH:1;
+ uint8_t RESERVED2:5;
+ } bits;
+ uint8_t raw;
+};
+
+union dwnstream_port_caps_byte3_hdmi {
+ struct {
+ uint8_t FRAME_SEQ_TO_FRAME_PACK:1;
+ uint8_t YCrCr422_PASS_THROUGH:1;
+ uint8_t YCrCr420_PASS_THROUGH:1;
+ uint8_t YCrCr422_CONVERSION:1;
+ uint8_t YCrCr420_CONVERSION:1;
+ uint8_t RESERVED:3;
+ } bits;
+ uint8_t raw;
+};
+
+/*4-byte structure for detailed capabilities of a down-stream port
+(DP-to-TMDS converter).*/
+
+union sink_status {
+ struct {
+ uint8_t RX_PORT0_STATUS:1;
+ uint8_t RX_PORT1_STATUS:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+/*6-byte structure corresponding to 6 registers (200h-205h)
+read during handling of HPD-IRQ*/
+union hpd_irq_data {
+ struct {
+ union sink_count sink_cnt;/* 200h */
+ union device_service_irq device_service_irq;/* 201h */
+ union lane_status lane01_status;/* 202h */
+ union lane_status lane23_status;/* 203h */
+ union lane_align_status_updated lane_status_updated;/* 204h */
+ union sink_status sink_status;
+ } bytes;
+ uint8_t raw[6];
+};
+
+union down_stream_port_count {
+ struct {
+ uint8_t DOWN_STR_PORT_COUNT:4;
+ uint8_t RESERVED:2; /*Bits 5:4 = RESERVED. Read all 0s.*/
+ /*Bit 6 = MSA_TIMING_PAR_IGNORED
+ 0 = Sink device requires the MSA timing parameters
+ 1 = Sink device is capable of rendering incoming video
+ stream without MSA timing parameters*/
+ uint8_t IGNORE_MSA_TIMING_PARAM:1;
+ /*Bit 7 = OUI Support
+ 0 = OUI not supported
+ 1 = OUI supported
+ (OUI and Device Identification mandatory for DP 1.2)*/
+ uint8_t OUI_SUPPORT:1;
+ } bits;
+ uint8_t raw;
+};
+
+union down_spread_ctrl {
+ struct {
+ uint8_t RESERVED1:4;/* Bit 3:0 = RESERVED. Read all 0s*/
+ /* Bits 4 = SPREAD_AMP. Spreading amplitude
+ 0 = Main link signal is not downspread
+ 1 = Main link signal is downspread <= 0.5%
+ with frequency in the range of 30kHz ~ 33kHz*/
+ uint8_t SPREAD_AMP:1;
+ uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/
+ /*Bit 7 = MSA_TIMING_PAR_IGNORE_EN
+ 0 = Source device will send valid data for the MSA Timing Params
+ 1 = Source device may send invalid data for these MSA Timing Params*/
+ uint8_t IGNORE_MSA_TIMING_PARAM:1;
+ } bits;
+ uint8_t raw;
+};
+
+union dpcd_edp_config {
+ struct {
+ uint8_t PANEL_MODE_EDP:1;
+ uint8_t FRAMING_CHANGE_ENABLE:1;
+ uint8_t RESERVED:5;
+ uint8_t PANEL_SELF_TEST_ENABLE:1;
+ } bits;
+ uint8_t raw;
+};
+
+struct dp_device_vendor_id {
+ uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+ uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+};
+
+struct dp_sink_hw_fw_revision {
+ uint8_t ieee_hw_rev;
+ uint8_t ieee_fw_rev[2];
+};
+
+/*DPCD register of DP receiver capability field bits-*/
+union edp_configuration_cap {
+ struct {
+ uint8_t ALT_SCRAMBLER_RESET:1;
+ uint8_t FRAMING_CHANGE:1;
+ uint8_t RESERVED:1;
+ uint8_t DPCD_DISPLAY_CONTROL_CAPABLE:1;
+ uint8_t RESERVED2:4;
+ } bits;
+ uint8_t raw;
+};
+
+union training_aux_rd_interval {
+ struct {
+ uint8_t TRAINIG_AUX_RD_INTERVAL:7;
+ uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+ } bits;
+ uint8_t raw;
+};
+
+/* Automated test structures */
+union test_request {
+ struct {
+ uint8_t LINK_TRAINING :1;
+ uint8_t LINK_TEST_PATTRN :1;
+ uint8_t EDID_REAT :1;
+ uint8_t PHY_TEST_PATTERN :1;
+ uint8_t AUDIO_TEST_PATTERN :1;
+ uint8_t RESERVED :1;
+ uint8_t TEST_STEREO_3D :1;
+ } bits;
+ uint8_t raw;
+};
+
+union test_response {
+ struct {
+ uint8_t ACK :1;
+ uint8_t NO_ACK :1;
+ uint8_t RESERVED :6;
+ } bits;
+ uint8_t raw;
+};
+
+union phy_test_pattern {
+ struct {
+ /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
+ * and 3 bits for DP1.2.
+ */
+ uint8_t PATTERN :3;
+ /* BY speci, bit7:2 is 0 for DP1.1. */
+ uint8_t RESERVED :5;
+ } bits;
+ uint8_t raw;
+};
+
+/* States of Compliance Test Specification (CTS DP1.2). */
+union compliance_test_state {
+ struct {
+ unsigned char STEREO_3D_RUNNING : 1;
+ unsigned char RESERVED : 7;
+ } bits;
+ unsigned char raw;
+};
+
+union link_test_pattern {
+ struct {
+ /* dpcd_link_test_patterns */
+ unsigned char PATTERN :2;
+ unsigned char RESERVED:6;
+ } bits;
+ unsigned char raw;
+};
+
+union test_misc {
+ struct dpcd_test_misc_bits {
+ unsigned char SYNC_CLOCK :1;
+ /* dpcd_test_color_format */
+ unsigned char CLR_FORMAT :2;
+ /* dpcd_test_dyn_range */
+ unsigned char DYN_RANGE :1;
+ unsigned char YCBCR :1;
+ /* dpcd_test_bit_depth */
+ unsigned char BPC :3;
+ } bits;
+ unsigned char raw;
+};
+
+#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
new file mode 100644
index 000000000000..0d84b2a1ccfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -0,0 +1,171 @@
+/*
+ * dc_helper.c
+ *
+ * Created on: Aug 30, 2016
+ * Author: agrodzov
+ */
+#include "dm_services.h"
+#include <stdarg.h>
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ uint32_t shift, mask, field_value;
+ int i = 1;
+
+ va_list ap;
+ va_start(ap, field_value1);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t);
+
+ reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift);
+ i++;
+ }
+
+ dm_write_reg(ctx, addr, reg_val);
+ va_end(ap);
+
+ return reg_val;
+}
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift, uint32_t mask, uint32_t *field_value)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+ return reg_val;
+}
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ return reg_val;
+}
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ return reg_val;
+}
+
+uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ return reg_val;
+}
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5)
+{
+ uint32_t reg_val = dm_read_reg(ctx, addr);
+ *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1);
+ *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2);
+ *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3);
+ *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4);
+ *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5);
+ return reg_val;
+}
+
+/* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer
+ * compiler won't be able to check for size match and is prone to stack corruption type of bugs
+
+uint32_t generic_reg_get(const struct dc_context *ctx,
+ uint32_t addr, int n, ...)
+{
+ uint32_t shift, mask;
+ uint32_t *field_value;
+ uint32_t reg_val;
+ int i = 0;
+
+ reg_val = dm_read_reg(ctx, addr);
+
+ va_list ap;
+ va_start(ap, n);
+
+ while (i < n) {
+ shift = va_arg(ap, uint32_t);
+ mask = va_arg(ap, uint32_t);
+ field_value = va_arg(ap, uint32_t *);
+
+ *field_value = get_reg_field_value_ex(reg_val, mask, shift);
+ i++;
+ }
+
+ va_end(ap);
+
+ return reg_val;
+}
+*/
+
+uint32_t generic_reg_wait(const struct dc_context *ctx,
+ uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
+ unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+ const char *func_name, int line)
+{
+ uint32_t field_value;
+ uint32_t reg_val;
+ int i;
+
+ /* something is terribly wrong if time out is > 200ms. (5Hz) */
+ ASSERT(delay_between_poll_us * time_out_num_tries <= 200000);
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ /* 35 seconds */
+ delay_between_poll_us = 35000;
+ time_out_num_tries = 1000;
+ }
+
+ for (i = 0; i <= time_out_num_tries; i++) {
+ if (i) {
+ if (delay_between_poll_us >= 1000)
+ msleep(delay_between_poll_us/1000);
+ else if (delay_between_poll_us > 0)
+ udelay(delay_between_poll_us);
+ }
+
+ reg_val = dm_read_reg(ctx, addr);
+
+ field_value = get_reg_field_value_ex(reg_val, mask, shift);
+
+ if (field_value == condition_value)
+ return reg_val;
+ }
+
+ dm_error("REG_WAIT timeout %dus * %d tries - %s line:%d\n",
+ delay_between_poll_us, time_out_num_tries,
+ func_name, line);
+
+ if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ BREAK_TO_DEBUGGER();
+
+ return reg_val;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
new file mode 100644
index 000000000000..1a9f57fb0838
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -0,0 +1,706 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_HW_TYPES_H
+#define DC_HW_TYPES_H
+
+#include "os_types.h"
+#include "fixed31_32.h"
+#include "signal_types.h"
+
+/******************************************************************************
+ * Data types for Virtual HW Layer of DAL3.
+ * (see DAL3 design documents for HW Layer definition)
+ *
+ * The intended uses are:
+ * 1. Generation pseudocode sequences for HW programming.
+ * 2. Implementation of real HW programming by HW Sequencer of DAL3.
+ *
+ * Note: do *not* add any types which are *not* used for HW programming - this
+ * will ensure separation of Logic layer from HW layer.
+ ******************************************************************************/
+
+union large_integer {
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ };
+
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ } u;
+
+ int64_t quad_part;
+};
+
+#define PHYSICAL_ADDRESS_LOC union large_integer
+
+enum dc_plane_addr_type {
+ PLN_ADDR_TYPE_GRAPHICS = 0,
+ PLN_ADDR_TYPE_GRPH_STEREO,
+ PLN_ADDR_TYPE_VIDEO_PROGRESSIVE,
+};
+
+struct dc_plane_address {
+ enum dc_plane_addr_type type;
+ bool tmz_surface;
+ union {
+ struct{
+ PHYSICAL_ADDRESS_LOC addr;
+ PHYSICAL_ADDRESS_LOC meta_addr;
+ union large_integer dcc_const_color;
+ } grph;
+
+ /*stereo*/
+ struct {
+ PHYSICAL_ADDRESS_LOC left_addr;
+ PHYSICAL_ADDRESS_LOC left_meta_addr;
+ union large_integer left_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC right_addr;
+ PHYSICAL_ADDRESS_LOC right_meta_addr;
+ union large_integer right_dcc_const_color;
+
+ } grph_stereo;
+
+ /*video progressive*/
+ struct {
+ PHYSICAL_ADDRESS_LOC luma_addr;
+ PHYSICAL_ADDRESS_LOC luma_meta_addr;
+ union large_integer luma_dcc_const_color;
+
+ PHYSICAL_ADDRESS_LOC chroma_addr;
+ PHYSICAL_ADDRESS_LOC chroma_meta_addr;
+ union large_integer chroma_dcc_const_color;
+ } video_progressive;
+ };
+};
+
+struct dc_size {
+ int width;
+ int height;
+};
+
+struct rect {
+ int x;
+ int y;
+ int width;
+ int height;
+};
+
+union plane_size {
+ /* Grph or Video will be selected
+ * based on format above:
+ * Use Video structure if
+ * format >= DalPixelFormat_VideoBegin
+ * else use Grph structure
+ */
+ struct {
+ struct rect surface_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch
+ * is 32 pixel aligned.
+ */
+ int surface_pitch;
+ } grph;
+
+ struct {
+ struct rect luma_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch is
+ * 32 pixel aligned.
+ */
+ int luma_pitch;
+
+ struct rect chroma_size;
+ /* Graphic surface pitch in pixels.
+ * In LINEAR_GENERAL mode, pitch is
+ * 32 pixel aligned.
+ */
+ int chroma_pitch;
+ } video;
+};
+
+struct dc_plane_dcc_param {
+ bool enable;
+
+ union {
+ struct {
+ int meta_pitch;
+ bool independent_64b_blks;
+ } grph;
+
+ struct {
+ int meta_pitch_l;
+ bool independent_64b_blks_l;
+
+ int meta_pitch_c;
+ bool independent_64b_blks_c;
+ } video;
+ };
+};
+
+/*Displayable pixel format in fb*/
+enum surface_pixel_format {
+ SURFACE_PIXEL_FORMAT_GRPH_BEGIN = 0,
+ /*TOBE REMOVED paletta 256 colors*/
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS =
+ SURFACE_PIXEL_FORMAT_GRPH_BEGIN,
+ /*16 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB1555,
+ /*16 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_RGB565,
+ /*32 bpp*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB8888,
+ /*32 bpp swaped*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR8888,
+
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010,
+ /*swaped*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010,
+ /*TOBE REMOVED swaped, XR_BIAS has no differance
+ * for pixel layout than previous and we can
+ * delete this after discusion*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
+ /*64 bpp */
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
+ /*float*/
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
+ /*swaped & float*/
+ SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
+ /*grow graphics here if necessary */
+
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
+ SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr,
+ SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb,
+ SURFACE_PIXEL_FORMAT_INVALID
+
+ /*grow 444 video here if necessary */
+};
+
+
+
+/* Pixel format */
+enum pixel_format {
+ /*graph*/
+ PIXEL_FORMAT_UNINITIALIZED,
+ PIXEL_FORMAT_INDEX8,
+ PIXEL_FORMAT_RGB565,
+ PIXEL_FORMAT_ARGB8888,
+ PIXEL_FORMAT_ARGB2101010,
+ PIXEL_FORMAT_ARGB2101010_XRBIAS,
+ PIXEL_FORMAT_FP16,
+ /*video*/
+ PIXEL_FORMAT_420BPP8,
+ PIXEL_FORMAT_420BPP10,
+ /*end of pixel format definition*/
+ PIXEL_FORMAT_INVALID,
+
+ PIXEL_FORMAT_GRPH_BEGIN = PIXEL_FORMAT_INDEX8,
+ PIXEL_FORMAT_GRPH_END = PIXEL_FORMAT_FP16,
+ PIXEL_FORMAT_VIDEO_BEGIN = PIXEL_FORMAT_420BPP8,
+ PIXEL_FORMAT_VIDEO_END = PIXEL_FORMAT_420BPP10,
+ PIXEL_FORMAT_UNKNOWN
+};
+
+enum tile_split_values {
+ DC_DISPLAY_MICRO_TILING = 0x0,
+ DC_THIN_MICRO_TILING = 0x1,
+ DC_DEPTH_MICRO_TILING = 0x2,
+ DC_ROTATED_MICRO_TILING = 0x3,
+};
+
+/* TODO: These values come from hardware spec. We need to readdress this
+ * if they ever change.
+ */
+enum array_mode_values {
+ DC_ARRAY_LINEAR_GENERAL = 0,
+ DC_ARRAY_LINEAR_ALLIGNED,
+ DC_ARRAY_1D_TILED_THIN1,
+ DC_ARRAY_1D_TILED_THICK,
+ DC_ARRAY_2D_TILED_THIN1,
+ DC_ARRAY_PRT_TILED_THIN1,
+ DC_ARRAY_PRT_2D_TILED_THIN1,
+ DC_ARRAY_2D_TILED_THICK,
+ DC_ARRAY_2D_TILED_X_THICK,
+ DC_ARRAY_PRT_TILED_THICK,
+ DC_ARRAY_PRT_2D_TILED_THICK,
+ DC_ARRAY_PRT_3D_TILED_THIN1,
+ DC_ARRAY_3D_TILED_THIN1,
+ DC_ARRAY_3D_TILED_THICK,
+ DC_ARRAY_3D_TILED_X_THICK,
+ DC_ARRAY_PRT_3D_TILED_THICK,
+};
+
+enum tile_mode_values {
+ DC_ADDR_SURF_MICRO_TILING_DISPLAY = 0x0,
+ DC_ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1,
+};
+
+enum swizzle_mode_values {
+ DC_SW_LINEAR = 0,
+ DC_SW_256B_S = 1,
+ DC_SW_256_D = 2,
+ DC_SW_256_R = 3,
+ DC_SW_4KB_S = 5,
+ DC_SW_4KB_D = 6,
+ DC_SW_4KB_R = 7,
+ DC_SW_64KB_S = 9,
+ DC_SW_64KB_D = 10,
+ DC_SW_64KB_R = 11,
+ DC_SW_VAR_S = 13,
+ DC_SW_VAR_D = 14,
+ DC_SW_VAR_R = 15,
+ DC_SW_64KB_S_T = 17,
+ DC_SW_64KB_D_T = 18,
+ DC_SW_4KB_S_X = 21,
+ DC_SW_4KB_D_X = 22,
+ DC_SW_4KB_R_X = 23,
+ DC_SW_64KB_S_X = 25,
+ DC_SW_64KB_D_X = 26,
+ DC_SW_64KB_R_X = 27,
+ DC_SW_VAR_S_X = 29,
+ DC_SW_VAR_D_X = 30,
+ DC_SW_VAR_R_X = 31,
+ DC_SW_MAX
+};
+
+union dc_tiling_info {
+
+ struct {
+ /* Specifies the number of memory banks for tiling
+ * purposes.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 2,4,8,16
+ */
+ unsigned int num_banks;
+ /* Specifies the number of tiles in the x direction
+ * to be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_width;
+ unsigned int bank_width_c;
+ /* Specifies the number of tiles in the y direction to
+ * be incorporated into the same bank.
+ * Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES: 1,2,4,8
+ */
+ unsigned int bank_height;
+ unsigned int bank_height_c;
+ /* Specifies the macro tile aspect ratio. Only applies
+ * to 2D and 3D tiling modes.
+ */
+ unsigned int tile_aspect;
+ unsigned int tile_aspect_c;
+ /* Specifies the number of bytes that will be stored
+ * contiguously for each tile.
+ * If the tile data requires more storage than this
+ * amount, it is split into multiple slices.
+ * This field must not be larger than
+ * GB_ADDR_CONFIG.DRAM_ROW_SIZE.
+ * Only applies to 2D and 3D tiling modes.
+ * For color render targets, TILE_SPLIT >= 256B.
+ */
+ enum tile_split_values tile_split;
+ enum tile_split_values tile_split_c;
+ /* Specifies the addressing within a tile.
+ * 0x0 - DISPLAY_MICRO_TILING
+ * 0x1 - THIN_MICRO_TILING
+ * 0x2 - DEPTH_MICRO_TILING
+ * 0x3 - ROTATED_MICRO_TILING
+ */
+ enum tile_mode_values tile_mode;
+ enum tile_mode_values tile_mode_c;
+ /* Specifies the number of pipes and how they are
+ * interleaved in the surface.
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ unsigned int pipe_config;
+ /* Specifies the tiling mode of the surface.
+ * THIN tiles use an 8x8x1 tile size.
+ * THICK tiles use an 8x8x4 tile size.
+ * 2D tiling modes rotate banks for successive Z slices
+ * 3D tiling modes rotate pipes and banks for Z slices
+ * Refer to memory addressing document for complete
+ * details and constraints.
+ */
+ enum array_mode_values array_mode;
+ } gfx8;
+
+ struct {
+ unsigned int num_pipes;
+ unsigned int num_banks;
+ unsigned int pipe_interleave;
+ unsigned int num_shader_engines;
+ unsigned int num_rb_per_se;
+ unsigned int max_compressed_frags;
+ bool shaderEnable;
+
+ enum swizzle_mode_values swizzle;
+ bool meta_linear;
+ bool rb_aligned;
+ bool pipe_aligned;
+ } gfx9;
+};
+
+/* Rotation angle */
+enum dc_rotation_angle {
+ ROTATION_ANGLE_0 = 0,
+ ROTATION_ANGLE_90,
+ ROTATION_ANGLE_180,
+ ROTATION_ANGLE_270,
+ ROTATION_ANGLE_COUNT
+};
+
+enum dc_scan_direction {
+ SCAN_DIRECTION_UNKNOWN = 0,
+ SCAN_DIRECTION_HORIZONTAL = 1, /* 0, 180 rotation */
+ SCAN_DIRECTION_VERTICAL = 2, /* 90, 270 rotation */
+};
+
+struct dc_cursor_position {
+ uint32_t x;
+ uint32_t y;
+
+ uint32_t x_hotspot;
+ uint32_t y_hotspot;
+
+ /*
+ * This parameter indicates whether HW cursor should be enabled
+ */
+ bool enable;
+
+};
+
+struct dc_cursor_mi_param {
+ unsigned int pixel_clk_khz;
+ unsigned int ref_clk_khz;
+ unsigned int viewport_x_start;
+ unsigned int viewport_width;
+ struct fixed31_32 h_scale_ratio;
+};
+
+/* IPP related types */
+
+enum {
+ GAMMA_RGB_256_ENTRIES = 256,
+ GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
+ GAMMA_MAX_ENTRIES = 1024
+};
+
+enum dc_gamma_type {
+ GAMMA_RGB_256 = 1,
+ GAMMA_RGB_FLOAT_1024 = 2
+};
+
+struct dc_gamma {
+ struct kref refcount;
+ enum dc_gamma_type type;
+ unsigned int num_entries;
+
+ struct dc_gamma_entries {
+ struct fixed31_32 red[GAMMA_MAX_ENTRIES];
+ struct fixed31_32 green[GAMMA_MAX_ENTRIES];
+ struct fixed31_32 blue[GAMMA_MAX_ENTRIES];
+ } entries;
+
+ /* private to DC core */
+ struct dc_context *ctx;
+};
+
+/* Used by both ipp amd opp functions*/
+/* TODO: to be consolidated with enum color_space */
+
+/*
+ * This enum is for programming CURSOR_MODE register field. What this register
+ * should be programmed to depends on OS requested cursor shape flags and what
+ * we stored in the cursor surface.
+ */
+enum dc_cursor_color_format {
+ CURSOR_MODE_MONO,
+ CURSOR_MODE_COLOR_1BIT_AND,
+ CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA,
+ CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA
+};
+
+/*
+ * This is all the parameters required by DAL in order to update the cursor
+ * attributes, including the new cursor image surface address, size, hotspot
+ * location, color format, etc.
+ */
+
+union dc_cursor_attribute_flags {
+ struct {
+ uint32_t ENABLE_MAGNIFICATION:1;
+ uint32_t INVERSE_TRANSPARENT_CLAMPING:1;
+ uint32_t HORIZONTAL_MIRROR:1;
+ uint32_t VERTICAL_MIRROR:1;
+ uint32_t INVERT_PIXEL_DATA:1;
+ uint32_t ZERO_EXPANSION:1;
+ uint32_t MIN_MAX_INVERT:1;
+ uint32_t RESERVED:25;
+ } bits;
+ uint32_t value;
+};
+
+struct dc_cursor_attributes {
+ PHYSICAL_ADDRESS_LOC address;
+ uint32_t pitch;
+
+ /* Width and height should correspond to cursor surface width x heigh */
+ uint32_t width;
+ uint32_t height;
+
+ enum dc_cursor_color_format color_format;
+
+ /* In case we support HW Cursor rotation in the future */
+ enum dc_rotation_angle rotation_angle;
+
+ union dc_cursor_attribute_flags attribute_flags;
+};
+
+/* OPP */
+
+enum dc_color_space {
+ COLOR_SPACE_UNKNOWN,
+ COLOR_SPACE_SRGB,
+ COLOR_SPACE_SRGB_LIMITED,
+ COLOR_SPACE_YCBCR601,
+ COLOR_SPACE_YCBCR709,
+ COLOR_SPACE_YCBCR601_LIMITED,
+ COLOR_SPACE_YCBCR709_LIMITED,
+ COLOR_SPACE_2020_RGB_FULLRANGE,
+ COLOR_SPACE_2020_RGB_LIMITEDRANGE,
+ COLOR_SPACE_2020_YCBCR,
+ COLOR_SPACE_ADOBERGB,
+};
+
+enum dc_dither_option {
+ DITHER_OPTION_DEFAULT,
+ DITHER_OPTION_DISABLE,
+ DITHER_OPTION_FM6,
+ DITHER_OPTION_FM8,
+ DITHER_OPTION_FM10,
+ DITHER_OPTION_SPATIAL6_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL8_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL10_FRAME_RANDOM,
+ DITHER_OPTION_SPATIAL6,
+ DITHER_OPTION_SPATIAL8,
+ DITHER_OPTION_SPATIAL10,
+ DITHER_OPTION_TRUN6,
+ DITHER_OPTION_TRUN8,
+ DITHER_OPTION_TRUN10,
+ DITHER_OPTION_TRUN10_SPATIAL8,
+ DITHER_OPTION_TRUN10_SPATIAL6,
+ DITHER_OPTION_TRUN10_FM8,
+ DITHER_OPTION_TRUN10_FM6,
+ DITHER_OPTION_TRUN10_SPATIAL8_FM6,
+ DITHER_OPTION_SPATIAL10_FM8,
+ DITHER_OPTION_SPATIAL10_FM6,
+ DITHER_OPTION_TRUN8_SPATIAL6,
+ DITHER_OPTION_TRUN8_FM6,
+ DITHER_OPTION_SPATIAL8_FM6,
+ DITHER_OPTION_MAX = DITHER_OPTION_SPATIAL8_FM6,
+ DITHER_OPTION_INVALID
+};
+
+enum dc_quantization_range {
+ QUANTIZATION_RANGE_UNKNOWN,
+ QUANTIZATION_RANGE_FULL,
+ QUANTIZATION_RANGE_LIMITED
+};
+
+/* XFM */
+
+/* used in struct dc_plane_state */
+struct scaling_taps {
+ uint32_t v_taps;
+ uint32_t h_taps;
+ uint32_t v_taps_c;
+ uint32_t h_taps_c;
+};
+
+enum dc_timing_standard {
+ TIMING_STANDARD_UNDEFINED,
+ TIMING_STANDARD_DMT,
+ TIMING_STANDARD_GTF,
+ TIMING_STANDARD_CVT,
+ TIMING_STANDARD_CVT_RB,
+ TIMING_STANDARD_CEA770,
+ TIMING_STANDARD_CEA861,
+ TIMING_STANDARD_HDMI,
+ TIMING_STANDARD_TV_NTSC,
+ TIMING_STANDARD_TV_NTSC_J,
+ TIMING_STANDARD_TV_PAL,
+ TIMING_STANDARD_TV_PAL_M,
+ TIMING_STANDARD_TV_PAL_CN,
+ TIMING_STANDARD_TV_SECAM,
+ TIMING_STANDARD_EXPLICIT,
+ /*!< For explicit timings from EDID, VBIOS, etc.*/
+ TIMING_STANDARD_USER_OVERRIDE,
+ /*!< For mode timing override by user*/
+ TIMING_STANDARD_MAX
+};
+
+
+
+enum dc_color_depth {
+ COLOR_DEPTH_UNDEFINED,
+ COLOR_DEPTH_666,
+ COLOR_DEPTH_888,
+ COLOR_DEPTH_101010,
+ COLOR_DEPTH_121212,
+ COLOR_DEPTH_141414,
+ COLOR_DEPTH_161616,
+ COLOR_DEPTH_COUNT
+};
+
+enum dc_pixel_encoding {
+ PIXEL_ENCODING_UNDEFINED,
+ PIXEL_ENCODING_RGB,
+ PIXEL_ENCODING_YCBCR422,
+ PIXEL_ENCODING_YCBCR444,
+ PIXEL_ENCODING_YCBCR420,
+ PIXEL_ENCODING_COUNT
+};
+
+enum dc_aspect_ratio {
+ ASPECT_RATIO_NO_DATA,
+ ASPECT_RATIO_4_3,
+ ASPECT_RATIO_16_9,
+ ASPECT_RATIO_64_27,
+ ASPECT_RATIO_256_135,
+ ASPECT_RATIO_FUTURE
+};
+
+enum scanning_type {
+ SCANNING_TYPE_NODATA = 0,
+ SCANNING_TYPE_OVERSCAN,
+ SCANNING_TYPE_UNDERSCAN,
+ SCANNING_TYPE_FUTURE,
+ SCANNING_TYPE_UNDEFINED
+};
+
+struct dc_crtc_timing_flags {
+ uint32_t INTERLACE :1;
+ uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+ it is positive polarity --reversed with dal1 or video bios define*/
+ uint32_t VSYNC_POSITIVE_POLARITY :1; /* when set to 1,
+ it is positive polarity --reversed with dal1 or video bios define*/
+
+ uint32_t HORZ_COUNT_BY_TWO:1;
+
+ uint32_t EXCLUSIVE_3D :1; /* if this bit set,
+ timing can be driven in 3D format only
+ and there is no corresponding 2D timing*/
+ uint32_t RIGHT_EYE_3D_POLARITY :1; /* 1 - means right eye polarity
+ (right eye = '1', left eye = '0') */
+ uint32_t SUB_SAMPLE_3D :1; /* 1 - means left/right images subsampled
+ when mixed into 3D image. 0 - means summation (3D timing is doubled)*/
+ uint32_t USE_IN_3D_VIEW_ONLY :1; /* Do not use this timing in 2D View,
+ because corresponding 2D timing also present in the list*/
+ uint32_t STEREO_3D_PREFERENCE :1; /* Means this is 2D timing
+ and we want to match priority of corresponding 3D timing*/
+ uint32_t Y_ONLY :1;
+
+ uint32_t YCBCR420 :1; /* TODO: shouldn't need this flag, should be a separate pixel format */
+ uint32_t DTD_COUNTER :5; /* values 1 to 16 */
+
+ uint32_t FORCE_HDR :1;
+
+ /* HDMI 2.0 - Support scrambling for TMDS character
+ * rates less than or equal to 340Mcsc */
+ uint32_t LTE_340MCSC_SCRAMBLE:1;
+
+};
+
+enum dc_timing_3d_format {
+ TIMING_3D_FORMAT_NONE,
+ TIMING_3D_FORMAT_FRAME_ALTERNATE, /* No stereosync at all*/
+ TIMING_3D_FORMAT_INBAND_FA, /* Inband Frame Alternate (DVI/DP)*/
+ TIMING_3D_FORMAT_DP_HDMI_INBAND_FA, /* Inband FA to HDMI Frame Pack*/
+ /* for active DP-HDMI dongle*/
+ TIMING_3D_FORMAT_SIDEBAND_FA, /* Sideband Frame Alternate (eDP)*/
+ TIMING_3D_FORMAT_HW_FRAME_PACKING,
+ TIMING_3D_FORMAT_SW_FRAME_PACKING,
+ TIMING_3D_FORMAT_ROW_INTERLEAVE,
+ TIMING_3D_FORMAT_COLUMN_INTERLEAVE,
+ TIMING_3D_FORMAT_PIXEL_INTERLEAVE,
+ TIMING_3D_FORMAT_SIDE_BY_SIDE,
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM,
+ TIMING_3D_FORMAT_SBS_SW_PACKED,
+ /* Side-by-side, packed by application/driver into 2D frame*/
+ TIMING_3D_FORMAT_TB_SW_PACKED,
+ /* Top-and-bottom, packed by application/driver into 2D frame*/
+
+ TIMING_3D_FORMAT_MAX,
+};
+
+
+struct dc_crtc_timing {
+
+ uint32_t h_total;
+ uint32_t h_border_left;
+ uint32_t h_addressable;
+ uint32_t h_border_right;
+ uint32_t h_front_porch;
+ uint32_t h_sync_width;
+
+ uint32_t v_total;
+ uint32_t v_border_top;
+ uint32_t v_addressable;
+ uint32_t v_border_bottom;
+ uint32_t v_front_porch;
+ uint32_t v_sync_width;
+
+ uint32_t pix_clk_khz;
+
+ uint32_t vic;
+ uint32_t hdmi_vic;
+ enum dc_timing_3d_format timing_3d_format;
+ enum dc_color_depth display_color_depth;
+ enum dc_pixel_encoding pixel_encoding;
+ enum dc_aspect_ratio aspect_ratio;
+ enum scanning_type scan_type;
+
+ struct dc_crtc_timing_flags flags;
+};
+
+#define MAX_TG_COLOR_VALUE 0x3FF
+struct tg_color {
+ /* Maximum 10 bits color value */
+ uint16_t color_r_cr;
+ uint16_t color_g_y;
+ uint16_t color_b_cb;
+};
+
+#endif /* DC_HW_TYPES_H */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
new file mode 100644
index 000000000000..a8698e399111
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -0,0 +1,652 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DC_TYPES_H_
+#define DC_TYPES_H_
+
+#include "fixed32_32.h"
+#include "fixed31_32.h"
+#include "irq_types.h"
+#include "dc_dp_types.h"
+#include "dc_hw_types.h"
+#include "dal_types.h"
+#include "grph_object_defs.h"
+
+/* forward declarations */
+struct dc_plane_state;
+struct dc_stream_state;
+struct dc_link;
+struct dc_sink;
+struct dal;
+
+/********************************
+ * Environment definitions
+ ********************************/
+enum dce_environment {
+ DCE_ENV_PRODUCTION_DRV = 0,
+ /* Emulation on FPGA, in "Maximus" System.
+ * This environment enforces that *only* DC registers accessed.
+ * (access to non-DC registers will hang FPGA) */
+ DCE_ENV_FPGA_MAXIMUS,
+ /* Emulation on real HW or on FPGA. Used by Diagnostics, enforces
+ * requirements of Diagnostics team. */
+ DCE_ENV_DIAG
+};
+
+/* Note: use these macro definitions instead of direct comparison! */
+#define IS_FPGA_MAXIMUS_DC(dce_environment) \
+ (dce_environment == DCE_ENV_FPGA_MAXIMUS)
+
+#define IS_DIAG_DC(dce_environment) \
+ (IS_FPGA_MAXIMUS_DC(dce_environment) || (dce_environment == DCE_ENV_DIAG))
+
+struct hw_asic_id {
+ uint32_t chip_id;
+ uint32_t chip_family;
+ uint32_t pci_revision_id;
+ uint32_t hw_internal_rev;
+ uint32_t vram_type;
+ uint32_t vram_width;
+ uint32_t feature_flags;
+ uint32_t fake_paths_num;
+ void *atombios_base_address;
+};
+
+struct dc_context {
+ struct dc *dc;
+
+ void *driver_context; /* e.g. amdgpu_device */
+
+ struct dal_logger *logger;
+ void *cgs_device;
+
+ enum dce_environment dce_environment;
+ struct hw_asic_id asic_id;
+
+ /* todo: below should probably move to dc. to facilitate removal
+ * of AS we will store these here
+ */
+ enum dce_version dce_version;
+ struct dc_bios *dc_bios;
+ bool created_bios;
+ struct gpio_service *gpio_service;
+ struct i2caux *i2caux;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ uint64_t fbc_gpu_addr;
+#endif
+};
+
+
+#define MAX_EDID_BUFFER_SIZE 512
+#define EDID_BLOCK_SIZE 128
+#define MAX_SURFACE_NUM 4
+#define NUM_PIXEL_FORMATS 10
+
+#include "dc_ddc_types.h"
+
+enum tiling_mode {
+ TILING_MODE_INVALID,
+ TILING_MODE_LINEAR,
+ TILING_MODE_TILED,
+ TILING_MODE_COUNT
+};
+
+enum view_3d_format {
+ VIEW_3D_FORMAT_NONE = 0,
+ VIEW_3D_FORMAT_FRAME_SEQUENTIAL,
+ VIEW_3D_FORMAT_SIDE_BY_SIDE,
+ VIEW_3D_FORMAT_TOP_AND_BOTTOM,
+ VIEW_3D_FORMAT_COUNT,
+ VIEW_3D_FORMAT_FIRST = VIEW_3D_FORMAT_FRAME_SEQUENTIAL
+};
+
+enum plane_stereo_format {
+ PLANE_STEREO_FORMAT_NONE = 0,
+ PLANE_STEREO_FORMAT_SIDE_BY_SIDE = 1,
+ PLANE_STEREO_FORMAT_TOP_AND_BOTTOM = 2,
+ PLANE_STEREO_FORMAT_FRAME_ALTERNATE = 3,
+ PLANE_STEREO_FORMAT_ROW_INTERLEAVED = 5,
+ PLANE_STEREO_FORMAT_COLUMN_INTERLEAVED = 6,
+ PLANE_STEREO_FORMAT_CHECKER_BOARD = 7
+};
+
+/* TODO: Find way to calculate number of bits
+ * Please increase if pixel_format enum increases
+ * num from PIXEL_FORMAT_INDEX8 to PIXEL_FORMAT_444BPP32
+ */
+
+enum dc_edid_connector_type {
+ EDID_CONNECTOR_UNKNOWN = 0,
+ EDID_CONNECTOR_ANALOG = 1,
+ EDID_CONNECTOR_DIGITAL = 10,
+ EDID_CONNECTOR_DVI = 11,
+ EDID_CONNECTOR_HDMIA = 12,
+ EDID_CONNECTOR_MDDI = 14,
+ EDID_CONNECTOR_DISPLAYPORT = 15
+};
+
+enum dc_edid_status {
+ EDID_OK,
+ EDID_BAD_INPUT,
+ EDID_NO_RESPONSE,
+ EDID_BAD_CHECKSUM,
+ EDID_THE_SAME,
+};
+
+/* audio capability from EDID*/
+struct dc_cea_audio_mode {
+ uint8_t format_code; /* ucData[0] [6:3]*/
+ uint8_t channel_count; /* ucData[0] [2:0]*/
+ uint8_t sample_rate; /* ucData[1]*/
+ union {
+ uint8_t sample_size; /* for LPCM*/
+ /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz)*/
+ uint8_t max_bit_rate;
+ uint8_t audio_codec_vendor_specific; /* for Audio Formats 9-15*/
+ };
+};
+
+struct dc_edid {
+ uint32_t length;
+ uint8_t raw_edid[MAX_EDID_BUFFER_SIZE];
+};
+
+/* When speaker location data block is not available, DEFAULT_SPEAKER_LOCATION
+ * is used. In this case we assume speaker location are: front left, front
+ * right and front center. */
+#define DEFAULT_SPEAKER_LOCATION 5
+
+#define DC_MAX_AUDIO_DESC_COUNT 16
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+
+union display_content_support {
+ unsigned int raw;
+ struct {
+ unsigned int valid_content_type :1;
+ unsigned int game_content :1;
+ unsigned int cinema_content :1;
+ unsigned int photo_content :1;
+ unsigned int graphics_content :1;
+ unsigned int reserved :27;
+ } bits;
+};
+
+struct dc_edid_caps {
+ /* sink identification */
+ uint16_t manufacturer_id;
+ uint16_t product_id;
+ uint32_t serial_number;
+ uint8_t manufacture_week;
+ uint8_t manufacture_year;
+ uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+
+ /* audio caps */
+ uint8_t speaker_flags;
+ uint32_t audio_mode_count;
+ struct dc_cea_audio_mode audio_modes[DC_MAX_AUDIO_DESC_COUNT];
+ uint32_t audio_latency;
+ uint32_t video_latency;
+
+ union display_content_support content_support;
+
+ uint8_t qs_bit;
+ uint8_t qy_bit;
+
+ /*HDMI 2.0 caps*/
+ bool lte_340mcsc_scramble;
+
+ bool edid_hdmi;
+};
+
+struct view {
+ uint32_t width;
+ uint32_t height;
+};
+
+struct dc_mode_flags {
+ /* note: part of refresh rate flag*/
+ uint32_t INTERLACE :1;
+ /* native display timing*/
+ uint32_t NATIVE :1;
+ /* preferred is the recommended mode, one per display */
+ uint32_t PREFERRED :1;
+ /* true if this mode should use reduced blanking timings
+ *_not_ related to the Reduced Blanking adjustment*/
+ uint32_t REDUCED_BLANKING :1;
+ /* note: part of refreshrate flag*/
+ uint32_t VIDEO_OPTIMIZED_RATE :1;
+ /* should be reported to upper layers as mode_flags*/
+ uint32_t PACKED_PIXEL_FORMAT :1;
+ /*< preferred view*/
+ uint32_t PREFERRED_VIEW :1;
+ /* this timing should be used only in tiled mode*/
+ uint32_t TILED_MODE :1;
+ uint32_t DSE_MODE :1;
+ /* Refresh rate divider when Miracast sink is using a
+ different rate than the output display device
+ Must be zero for wired displays and non-zero for
+ Miracast displays*/
+ uint32_t MIRACAST_REFRESH_DIVIDER;
+};
+
+
+enum dc_timing_source {
+ TIMING_SOURCE_UNDEFINED,
+
+ /* explicitly specifed by user, most important*/
+ TIMING_SOURCE_USER_FORCED,
+ TIMING_SOURCE_USER_OVERRIDE,
+ TIMING_SOURCE_CUSTOM,
+ TIMING_SOURCE_EXPLICIT,
+
+ /* explicitly specified by the display device, more important*/
+ TIMING_SOURCE_EDID_CEA_SVD_3D,
+ TIMING_SOURCE_EDID_CEA_SVD_PREFERRED,
+ TIMING_SOURCE_EDID_CEA_SVD_420,
+ TIMING_SOURCE_EDID_DETAILED,
+ TIMING_SOURCE_EDID_ESTABLISHED,
+ TIMING_SOURCE_EDID_STANDARD,
+ TIMING_SOURCE_EDID_CEA_SVD,
+ TIMING_SOURCE_EDID_CVT_3BYTE,
+ TIMING_SOURCE_EDID_4BYTE,
+ TIMING_SOURCE_VBIOS,
+ TIMING_SOURCE_CV,
+ TIMING_SOURCE_TV,
+ TIMING_SOURCE_HDMI_VIC,
+
+ /* implicitly specified by display device, still safe but less important*/
+ TIMING_SOURCE_DEFAULT,
+
+ /* only used for custom base modes */
+ TIMING_SOURCE_CUSTOM_BASE,
+
+ /* these timing might not work, least important*/
+ TIMING_SOURCE_RANGELIMIT,
+ TIMING_SOURCE_OS_FORCED,
+ TIMING_SOURCE_IMPLICIT,
+
+ /* only used by default mode list*/
+ TIMING_SOURCE_BASICMODE,
+
+ TIMING_SOURCE_COUNT
+};
+
+
+struct stereo_3d_features {
+ bool supported ;
+ bool allTimings ;
+ bool cloneMode ;
+ bool scaling ;
+ bool singleFrameSWPacked;
+};
+
+enum dc_timing_support_method {
+ TIMING_SUPPORT_METHOD_UNDEFINED,
+ TIMING_SUPPORT_METHOD_EXPLICIT,
+ TIMING_SUPPORT_METHOD_IMPLICIT,
+ TIMING_SUPPORT_METHOD_NATIVE
+};
+
+struct dc_mode_info {
+ uint32_t pixel_width;
+ uint32_t pixel_height;
+ uint32_t field_rate;
+ /* Vertical refresh rate for progressive modes.
+ * Field rate for interlaced modes.*/
+
+ enum dc_timing_standard timing_standard;
+ enum dc_timing_source timing_source;
+ struct dc_mode_flags flags;
+};
+
+enum dc_power_state {
+ DC_POWER_STATE_ON = 1,
+ DC_POWER_STATE_STANDBY,
+ DC_POWER_STATE_SUSPEND,
+ DC_POWER_STATE_OFF
+};
+
+/* DC PowerStates */
+enum dc_video_power_state {
+ DC_VIDEO_POWER_UNSPECIFIED = 0,
+ DC_VIDEO_POWER_ON = 1,
+ DC_VIDEO_POWER_STANDBY,
+ DC_VIDEO_POWER_SUSPEND,
+ DC_VIDEO_POWER_OFF,
+ DC_VIDEO_POWER_HIBERNATE,
+ DC_VIDEO_POWER_SHUTDOWN,
+ DC_VIDEO_POWER_ULPS, /* BACO or Ultra-Light-Power-State */
+ DC_VIDEO_POWER_AFTER_RESET,
+ DC_VIDEO_POWER_MAXIMUM
+};
+
+enum dc_acpi_cm_power_state {
+ DC_ACPI_CM_POWER_STATE_D0 = 1,
+ DC_ACPI_CM_POWER_STATE_D1 = 2,
+ DC_ACPI_CM_POWER_STATE_D2 = 4,
+ DC_ACPI_CM_POWER_STATE_D3 = 8
+};
+
+enum dc_connection_type {
+ dc_connection_none,
+ dc_connection_single,
+ dc_connection_mst_branch,
+ dc_connection_active_dongle
+};
+
+struct dc_csc_adjustments {
+ struct fixed31_32 contrast;
+ struct fixed31_32 saturation;
+ struct fixed31_32 brightness;
+ struct fixed31_32 hue;
+};
+
+enum {
+ MAX_LANES = 2,
+ MAX_COFUNC_PATH = 6,
+ LAYER_INDEX_PRIMARY = -1,
+};
+
+enum dpcd_downstream_port_max_bpc {
+ DOWN_STREAM_MAX_8BPC = 0,
+ DOWN_STREAM_MAX_10BPC,
+ DOWN_STREAM_MAX_12BPC,
+ DOWN_STREAM_MAX_16BPC
+};
+struct dc_dongle_caps {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ bool extendedCapValid;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ bool is_dp_hdmi_ycbcr422_pass_through;
+ bool is_dp_hdmi_ycbcr420_pass_through;
+ bool is_dp_hdmi_ycbcr422_converter;
+ bool is_dp_hdmi_ycbcr420_converter;
+ uint32_t dp_hdmi_max_bpc;
+ uint32_t dp_hdmi_max_pixel_clk;
+};
+/* Scaling format */
+enum scaling_transformation {
+ SCALING_TRANSFORMATION_UNINITIALIZED,
+ SCALING_TRANSFORMATION_IDENTITY = 0x0001,
+ SCALING_TRANSFORMATION_CENTER_TIMING = 0x0002,
+ SCALING_TRANSFORMATION_FULL_SCREEN_SCALE = 0x0004,
+ SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE = 0x0008,
+ SCALING_TRANSFORMATION_DAL_DECIDE = 0x0010,
+ SCALING_TRANSFORMATION_INVALID = 0x80000000,
+
+ /* Flag the first and last */
+ SCALING_TRANSFORMATION_BEGING = SCALING_TRANSFORMATION_IDENTITY,
+ SCALING_TRANSFORMATION_END =
+ SCALING_TRANSFORMATION_PRESERVE_ASPECT_RATIO_SCALE
+};
+
+enum display_content_type {
+ DISPLAY_CONTENT_TYPE_NO_DATA = 0,
+ DISPLAY_CONTENT_TYPE_GRAPHICS = 1,
+ DISPLAY_CONTENT_TYPE_PHOTO = 2,
+ DISPLAY_CONTENT_TYPE_CINEMA = 4,
+ DISPLAY_CONTENT_TYPE_GAME = 8
+};
+
+/* audio*/
+
+union audio_sample_rates {
+ struct sample_rates {
+ uint8_t RATE_32:1;
+ uint8_t RATE_44_1:1;
+ uint8_t RATE_48:1;
+ uint8_t RATE_88_2:1;
+ uint8_t RATE_96:1;
+ uint8_t RATE_176_4:1;
+ uint8_t RATE_192:1;
+ } rate;
+
+ uint8_t all;
+};
+
+struct audio_speaker_flags {
+ uint32_t FL_FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RR:1;
+ uint32_t RC:1;
+ uint32_t FLC_FRC:1;
+ uint32_t RLC_RRC:1;
+ uint32_t SUPPORT_AI:1;
+};
+
+struct audio_speaker_info {
+ uint32_t ALLSPEAKERS:7;
+ uint32_t SUPPORT_AI:1;
+};
+
+
+struct audio_info_flags {
+
+ union {
+
+ struct audio_speaker_flags speaker_flags;
+ struct audio_speaker_info info;
+
+ uint8_t all;
+ };
+};
+
+enum audio_format_code {
+ AUDIO_FORMAT_CODE_FIRST = 1,
+ AUDIO_FORMAT_CODE_LINEARPCM = AUDIO_FORMAT_CODE_FIRST,
+
+ AUDIO_FORMAT_CODE_AC3,
+ /*Layers 1 & 2 */
+ AUDIO_FORMAT_CODE_MPEG1,
+ /*MPEG1 Layer 3 */
+ AUDIO_FORMAT_CODE_MP3,
+ /*multichannel */
+ AUDIO_FORMAT_CODE_MPEG2,
+ AUDIO_FORMAT_CODE_AAC,
+ AUDIO_FORMAT_CODE_DTS,
+ AUDIO_FORMAT_CODE_ATRAC,
+ AUDIO_FORMAT_CODE_1BITAUDIO,
+ AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS,
+ AUDIO_FORMAT_CODE_DTS_HD,
+ AUDIO_FORMAT_CODE_MAT_MLP,
+ AUDIO_FORMAT_CODE_DST,
+ AUDIO_FORMAT_CODE_WMAPRO,
+ AUDIO_FORMAT_CODE_LAST,
+ AUDIO_FORMAT_CODE_COUNT =
+ AUDIO_FORMAT_CODE_LAST - AUDIO_FORMAT_CODE_FIRST
+};
+
+struct audio_mode {
+ /* ucData[0] [6:3] */
+ enum audio_format_code format_code;
+ /* ucData[0] [2:0] */
+ uint8_t channel_count;
+ /* ucData[1] */
+ union audio_sample_rates sample_rates;
+ union {
+ /* for LPCM */
+ uint8_t sample_size;
+ /* for Audio Formats 2-8 (Max bit rate divided by 8 kHz) */
+ uint8_t max_bit_rate;
+ /* for Audio Formats 9-15 */
+ uint8_t vendor_specific;
+ };
+};
+
+struct audio_info {
+ struct audio_info_flags flags;
+ uint32_t video_latency;
+ uint32_t audio_latency;
+ uint32_t display_index;
+ uint8_t display_name[AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS];
+ uint32_t manufacture_id;
+ uint32_t product_id;
+ /* PortID used for ContainerID when defined */
+ uint32_t port_id[2];
+ uint32_t mode_count;
+ /* this field must be last in this struct */
+ struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT];
+};
+
+struct freesync_context {
+ bool supported;
+ bool enabled;
+ bool active;
+
+ unsigned int min_refresh_in_micro_hz;
+ unsigned int nominal_refresh_in_micro_hz;
+};
+
+struct psr_config {
+ unsigned char psr_version;
+ unsigned int psr_rfb_setup_time;
+ bool psr_exit_link_training_required;
+
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
+union dmcu_psr_level {
+ struct {
+ unsigned int SKIP_CRC:1;
+ unsigned int SKIP_DP_VID_STREAM_DISABLE:1;
+ unsigned int SKIP_PHY_POWER_DOWN:1;
+ unsigned int SKIP_AUX_ACK_CHECK:1;
+ unsigned int SKIP_CRTC_DISABLE:1;
+ unsigned int SKIP_AUX_RFB_CAPTURE_CHECK:1;
+ unsigned int SKIP_SMU_NOTIFICATION:1;
+ unsigned int SKIP_AUTO_STATE_ADVANCE:1;
+ unsigned int DISABLE_PSR_ENTRY_ABORT:1;
+ unsigned int SKIP_SINGLE_OTG_DISABLE:1;
+ unsigned int RESERVED:22;
+ } bits;
+ unsigned int u32all;
+};
+
+enum physical_phy_id {
+ PHYLD_0,
+ PHYLD_1,
+ PHYLD_2,
+ PHYLD_3,
+ PHYLD_4,
+ PHYLD_5,
+ PHYLD_6,
+ PHYLD_7,
+ PHYLD_8,
+ PHYLD_9,
+ PHYLD_COUNT,
+ PHYLD_UNKNOWN = (-1L)
+};
+
+enum phy_type {
+ PHY_TYPE_UNKNOWN = 1,
+ PHY_TYPE_PCIE_PHY = 2,
+ PHY_TYPE_UNIPHY = 3,
+};
+
+struct psr_context {
+ /* ddc line */
+ enum channel_id channel;
+ /* Transmitter id */
+ enum transmitter transmitterId;
+ /* Engine Id is used for Dig Be source select */
+ enum engine_id engineId;
+ /* Controller Id used for Dig Fe source select */
+ enum controller_id controllerId;
+ /* Pcie or Uniphy */
+ enum phy_type phyType;
+ /* Physical PHY Id used by SMU interpretation */
+ enum physical_phy_id smuPhyId;
+ /* Vertical total pixels from crtc timing.
+ * This is used for static screen detection.
+ * ie. If we want to detect half a frame,
+ * we use this to determine the hyst lines.
+ */
+ unsigned int crtcTimingVerticalTotal;
+ /* PSR supported from panel capabilities and
+ * current display configuration
+ */
+ bool psrSupportedDisplayConfig;
+ /* Whether fast link training is supported by the panel */
+ bool psrExitLinkTrainingRequired;
+ /* If RFB setup time is greater than the total VBLANK time,
+ * it is not possible for the sink to capture the video frame
+ * in the same frame the SDP is sent. In this case,
+ * the frame capture indication bit should be set and an extra
+ * static frame should be transmitted to the sink.
+ */
+ bool psrFrameCaptureIndicationReq;
+ /* Set the last possible line SDP may be transmitted without violating
+ * the RFB setup time or entering the active video frame.
+ */
+ unsigned int sdpTransmitLineNumDeadline;
+ /* The VSync rate in Hz used to calculate the
+ * step size for smooth brightness feature
+ */
+ unsigned int vsyncRateHz;
+ unsigned int skipPsrWaitForPllLock;
+ unsigned int numberOfControllers;
+ /* Unused, for future use. To indicate that first changed frame from
+ * state3 shouldn't result in psr_inactive, but rather to perform
+ * an automatic single frame rfb_update.
+ */
+ bool rfb_update_auto_en;
+ /* Number of frame before entering static screen */
+ unsigned int timehyst_frames;
+ /* Partial frames before entering static screen */
+ unsigned int hyst_lines;
+ /* # of repeated AUX transaction attempts to make before
+ * indicating failure to the driver
+ */
+ unsigned int aux_repeats;
+ /* Controls hw blocks to power down during PSR active state */
+ union dmcu_psr_level psr_level;
+ /* Controls additional delay after remote frame capture before
+ * continuing powerd own
+ */
+ unsigned int frame_delay;
+};
+
+struct colorspace_transform {
+ struct fixed31_32 matrix[12];
+ bool enable_remap;
+};
+
+struct csc_transform {
+ uint16_t matrix[12];
+ bool enable_adjustment;
+};
+
+enum i2c_mot_mode {
+ I2C_MOT_UNDEF,
+ I2C_MOT_TRUE,
+ I2C_MOT_FALSE
+};
+
+#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
new file mode 100644
index 000000000000..8abec0bed379
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for common 'dce' logic
+# HW object file under this folder follow similar pattern for HW programming
+# - register offset and/or shift + mask stored in the dec_hw struct
+# - register programming through common macros that look up register
+# offset/shift/mask stored in dce_hw struct
+
+DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
+dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+
+
+AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
new file mode 100644
index 000000000000..0e0336c5af4e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_abm.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "dc.h"
+
+#include "atom.h"
+
+
+#define TO_DCE_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (abm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ abm_dce->abm_shift->field_name, abm_dce->abm_mask->field_name
+
+#define CTX \
+ abm_dce->base.ctx
+
+#define MCP_ABM_LEVEL_SET 0x65
+#define MCP_ABM_PIPE_SET 0x66
+#define MCP_BL_SET 0x67
+
+#define MCP_DISABLE_ABM_IMMEDIATELY 255
+
+struct abm_backlight_registers {
+ unsigned int BL_PWM_CNTL;
+ unsigned int BL_PWM_CNTL2;
+ unsigned int BL_PWM_PERIOD_CNTL;
+ unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+/* registers setting needs to be save and restored used at InitBacklight */
+static struct abm_backlight_registers stored_backlight_registers = {0};
+
+
+static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+{
+ uint64_t current_backlight;
+ uint32_t round_result;
+ uint32_t pwm_period_cntl, bl_period, bl_int_count;
+ uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period_mask, bl_pwm_mask;
+
+ pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+ bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+ REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+ if (bl_int_count == 0)
+ bl_int_count = 16;
+
+ bl_period_mask = (1 << bl_int_count) - 1;
+ bl_period &= bl_period_mask;
+
+ bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+ if (fractional_duty_cycle_en == 0)
+ bl_pwm &= bl_pwm_mask;
+ else
+ bl_pwm &= 0xFFFF;
+
+ current_backlight = bl_pwm << (1 + bl_int_count);
+
+ if (bl_period == 0)
+ bl_period = 0xFFFF;
+
+ current_backlight = div_u64(current_backlight, bl_period);
+ current_backlight = (current_backlight + 1) >> 1;
+
+ current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+ round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+ round_result = (round_result >> (bl_int_count-1)) & 1;
+
+ current_backlight >>= bl_int_count;
+ current_backlight += round_result;
+
+ return (uint32_t)(current_backlight);
+}
+
+static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+{
+ uint32_t backlight_24bit;
+ uint32_t backlight_17bit;
+ uint32_t backlight_16bit;
+ uint32_t masked_pwm_period;
+ uint8_t rounding_bit;
+ uint8_t bit_count;
+ uint64_t active_duty_cycle;
+ uint32_t pwm_period_bitcnt;
+
+ /*
+ * 1. Convert 8-bit value to 17 bit U1.16 format
+ * (1 integer, 16 fractional bits)
+ */
+
+ /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
+ * effectively multiplying value by 256/255
+ * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
+ */
+ backlight_24bit = level * 0x10101;
+
+ /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
+ * used for rounding, take most significant bit of fraction for
+ * rounding, e.g. for 0xEFEFEF, rounding bit is 1
+ */
+ rounding_bit = (backlight_24bit >> 7) & 1;
+
+ /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
+ * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
+ */
+ backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
+
+ /*
+ * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * active duty cycle <= backlight period
+ */
+
+ /* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+ */
+ REG_GET_2(BL_PWM_PERIOD_CNTL,
+ BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+ BL_PWM_PERIOD, &masked_pwm_period);
+
+ if (pwm_period_bitcnt == 0)
+ bit_count = 16;
+ else
+ bit_count = pwm_period_bitcnt;
+
+ /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+ /* 2.2 Calculate integer active duty cycle required upper 16 bits
+ * contain integer component, lower 16 bits contain fractional component
+ * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+ */
+ active_duty_cycle = backlight_17bit * masked_pwm_period;
+
+ /* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+ * components shift by bitCount then mask 16 bits and add rounding bit
+ * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+ */
+ backlight_16bit = active_duty_cycle >> bit_count;
+ backlight_16bit &= 0xFFFF;
+ backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+ /*
+ * 3. Program register with updated value
+ */
+
+ /* 3.1 Lock group 2 backlight registers */
+
+ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+ BL_PWM_GRP1_REG_LOCK, 1);
+
+ // 3.2 Write new active duty cycle
+ REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+ /* 3.3 Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ /* 5.4.4 Wait for pending bit to be cleared */
+ REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+ 1, 10000);
+}
+
+static void dmcu_set_backlight_level(
+ struct dce_abm *abm_dce,
+ uint32_t level,
+ uint32_t frame_ramp,
+ uint32_t controller_id)
+{
+ unsigned int backlight_16_bit = (level * 0x10101) >> 8;
+ unsigned int backlight_17_bit = backlight_16_bit +
+ (((backlight_16_bit & 0x80) >> 7) & 1);
+ uint32_t rampingBoundary = 0xFFFF;
+ uint32_t s2;
+
+ /* set ramping boundary */
+ REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
+
+ /* setDMCUParam_Pipe */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_PIPE_SET,
+ MASTER_COMM_CMD_REG_BYTE1, controller_id);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+ 0, 1, 80000);
+
+ /* setDMCUParam_BL */
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+
+ /* write ramp */
+ if (controller_id == 0)
+ frame_ramp = 0;
+ REG_WRITE(MASTER_COMM_DATA_REG1, frame_ramp);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, MCP_BL_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* UpdateRequestedBacklightLevel */
+ s2 = REG_READ(BIOS_SCRATCH_2);
+
+ s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dce_abm_init(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+}
+
+static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ return (backlight >> 8);
+}
+
+static bool dce_abm_set_level(struct abm *abm, uint32_t level)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* setDMCUParam_ABMLevel */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+ MASTER_COMM_CMD_REG_BYTE2, level);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
+
+static bool dce_abm_immediate_disable(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ 1, 80000);
+
+ /* setDMCUParam_ABMLevel */
+ REG_UPDATE_2(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, MCP_ABM_LEVEL_SET,
+ MASTER_COMM_CMD_REG_BYTE2, MCP_DISABLE_ABM_IMMEDIATELY);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
+}
+
+static bool dce_abm_init_backlight(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ uint32_t value;
+
+ /* It must not be 0, so we have to restore them
+ * Bios bug w/a - period resets to zero,
+ * restoring to cache values which is always correct
+ */
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+ if (value == 0 || value == 1) {
+ if (stored_backlight_registers.BL_PWM_CNTL != 0) {
+ REG_WRITE(BL_PWM_CNTL,
+ stored_backlight_registers.BL_PWM_CNTL);
+ REG_WRITE(BL_PWM_CNTL2,
+ stored_backlight_registers.BL_PWM_CNTL2);
+ REG_WRITE(BL_PWM_PERIOD_CNTL,
+ stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+ REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
+ BL_PWM_REF_DIV,
+ stored_backlight_registers.
+ LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ } else {
+ /* TODO: Note: This should not really happen since VBIOS
+ * should have initialized PWM registers on boot.
+ */
+ REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+ REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+ }
+ } else {
+ stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &stored_backlight_registers.
+ LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ }
+
+ /* Have driver take backlight control
+ * TakeBacklightControl(true)
+ */
+ value = REG_READ(BIOS_SCRATCH_2);
+ value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+ REG_WRITE(BIOS_SCRATCH_2, value);
+
+ /* Enable the backlight output */
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+ /* Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ return true;
+}
+
+static bool is_dmcu_initialized(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int dmcu_uc_reset;
+
+ REG_GET(DMCU_STATUS, UC_IN_RESET, &dmcu_uc_reset);
+
+ return !dmcu_uc_reset;
+}
+
+static bool dce_abm_set_backlight_level(
+ struct abm *abm,
+ unsigned int backlight_level,
+ unsigned int frame_ramp,
+ unsigned int controller_id)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+
+ dm_logger_write(abm->ctx->logger, LOG_BACKLIGHT,
+ "New Backlight level: %d (0x%X)\n",
+ backlight_level, backlight_level);
+
+ /* If DMCU is in reset state, DMCU is uninitialized */
+ if (is_dmcu_initialized(abm))
+ dmcu_set_backlight_level(abm_dce,
+ backlight_level,
+ frame_ramp,
+ controller_id);
+ else
+ driver_set_backlight_level(abm_dce, backlight_level);
+
+ return true;
+}
+
+static const struct abm_funcs dce_funcs = {
+ .abm_init = dce_abm_init,
+ .set_abm_level = dce_abm_set_level,
+ .init_backlight = dce_abm_init_backlight,
+ .set_backlight_level = dce_abm_set_backlight_level,
+ .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+ .set_abm_immediate_disable = dce_abm_immediate_disable,
+ .is_dmcu_initialized = is_dmcu_initialized
+};
+
+static void dce_abm_construct(
+ struct dce_abm *abm_dce,
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct abm *base = &abm_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ abm_dce->regs = regs;
+ abm_dce->abm_shift = abm_shift;
+ abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dce_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+ abm_dce->base.funcs = &dce_funcs;
+
+ return &abm_dce->base;
+}
+
+void dce_abm_destroy(struct abm **abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
+
+ kfree(abm_dce);
+ *abm = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
new file mode 100644
index 000000000000..59e909ec88f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_ABM_H_
+#define _DCE_ABM_H_
+
+#include "abm.h"
+
+#define ABM_COMMON_REG_LIST_DCE_BASE() \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(LVTMA_PWRSEQ_REF_DIV), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(DMCU_STATUS)
+
+#define ABM_DCE110_COMMON_REG_LIST() \
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DC_ABM1_HG_SAMPLE_RATE), \
+ SR(DC_ABM1_LS_SAMPLE_RATE), \
+ SR(BL1_PWM_BL_UPDATE_SAMPLE_RATE), \
+ SR(DC_ABM1_HG_MISC_CTRL), \
+ SR(DC_ABM1_IPCSC_COEFF_SEL), \
+ SR(BL1_PWM_CURRENT_ABM_LEVEL), \
+ SR(BL1_PWM_TARGET_ABM_LEVEL), \
+ SR(BL1_PWM_USER_LEVEL), \
+ SR(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES), \
+ SR(DC_ABM1_HGLS_REG_READ_PROGRESS), \
+ SR(BIOS_SCRATCH_2)
+
+#define ABM_DCN10_REG_LIST(id)\
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ NBIO_SR(BIOS_SCRATCH_2)
+
+#define ABM_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+ ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+ ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+ ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
+ ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+ ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
+ ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE2, mask_sh), \
+ ABM_SF(DMCU_STATUS, UC_IN_RESET, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCE110(mask_sh) \
+ ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_VMAX_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+ ABM_SF(DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+ ABM_SF(BL1_PWM_CURRENT_ABM_LEVEL, \
+ BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+ ABM_SF(BL1_PWM_TARGET_ABM_LEVEL, \
+ BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+ ABM_SF(BL1_PWM_USER_LEVEL, \
+ BL1_PWM_USER_LEVEL, mask_sh), \
+ ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_MASK_SH_LIST_DCN10(mask_sh) \
+ ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_NUM_OF_BINS_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_VMAX_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HG_MISC_CTRL, \
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_R, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_G, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_IPCSC_COEFF_SEL, \
+ ABM1_IPCSC_COEFF_SEL_B, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_CURRENT_ABM_LEVEL, \
+ BL1_PWM_CURRENT_ABM_LEVEL, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_TARGET_ABM_LEVEL, \
+ BL1_PWM_TARGET_ABM_LEVEL, mask_sh), \
+ ABM_SF(ABM0_BL1_PWM_USER_LEVEL, \
+ BL1_PWM_USER_LEVEL, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, \
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, mask_sh), \
+ ABM_SF(ABM0_DC_ABM1_HGLS_REG_READ_PROGRESS, \
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, mask_sh)
+
+#define ABM_REG_FIELD_LIST(type) \
+ type ABM1_HG_NUM_OF_BINS_SEL; \
+ type ABM1_HG_VMAX_SEL; \
+ type ABM1_HG_BIN_BITWIDTH_SIZE_SEL; \
+ type ABM1_IPCSC_COEFF_SEL_R; \
+ type ABM1_IPCSC_COEFF_SEL_G; \
+ type ABM1_IPCSC_COEFF_SEL_B; \
+ type BL1_PWM_CURRENT_ABM_LEVEL; \
+ type BL1_PWM_TARGET_ABM_LEVEL; \
+ type BL1_PWM_USER_LEVEL; \
+ type ABM1_LS_MIN_PIXEL_VALUE_THRES; \
+ type ABM1_LS_MAX_PIXEL_VALUE_THRES; \
+ type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
+ type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
+ type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
+ type BL_PWM_PERIOD; \
+ type BL_PWM_PERIOD_BITCNT; \
+ type BL_ACTIVE_INT_FRAC_CNT; \
+ type BL_PWM_FRACTIONAL_EN; \
+ type MASTER_COMM_INTERRUPT; \
+ type MASTER_COMM_CMD_REG_BYTE0; \
+ type MASTER_COMM_CMD_REG_BYTE1; \
+ type MASTER_COMM_CMD_REG_BYTE2; \
+ type BL_PWM_REF_DIV; \
+ type BL_PWM_EN; \
+ type UC_IN_RESET; \
+ type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+ type BL_PWM_GRP1_REG_LOCK; \
+ type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_abm_shift {
+ ABM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_abm_mask {
+ ABM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_abm_registers {
+ uint32_t BL_PWM_PERIOD_CNTL;
+ uint32_t BL_PWM_CNTL;
+ uint32_t BL_PWM_CNTL2;
+ uint32_t LVTMA_PWRSEQ_REF_DIV;
+ uint32_t DC_ABM1_HG_SAMPLE_RATE;
+ uint32_t DC_ABM1_LS_SAMPLE_RATE;
+ uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
+ uint32_t DC_ABM1_HG_MISC_CTRL;
+ uint32_t DC_ABM1_IPCSC_COEFF_SEL;
+ uint32_t BL1_PWM_CURRENT_ABM_LEVEL;
+ uint32_t BL1_PWM_TARGET_ABM_LEVEL;
+ uint32_t BL1_PWM_USER_LEVEL;
+ uint32_t DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES;
+ uint32_t DC_ABM1_HGLS_REG_READ_PROGRESS;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t BIOS_SCRATCH_2;
+ uint32_t DMCU_STATUS;
+ uint32_t BL_PWM_GRP1_REG_LOCK;
+};
+
+struct dce_abm {
+ struct abm base;
+ const struct dce_abm_registers *regs;
+ const struct dce_abm_shift *abm_shift;
+ const struct dce_abm_mask *abm_mask;
+};
+
+struct abm *dce_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask);
+
+void dce_abm_destroy(struct abm **abm);
+
+#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
new file mode 100644
index 000000000000..0df9ecb2710c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dce_audio.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define DCE_AUD(audio)\
+ container_of(audio, struct dce_audio, base)
+
+#define CTX \
+ aud->base.ctx
+#define REG(reg)\
+ (aud->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ aud->shifts->field_name, aud->masks->field_name
+
+#define IX_REG(reg)\
+ ix ## reg
+
+#define AZ_REG_READ(reg_name) \
+ read_indirect_azalia_reg(audio, IX_REG(reg_name))
+
+#define AZ_REG_WRITE(reg_name, value) \
+ write_indirect_azalia_reg(audio, IX_REG(reg_name), value)
+
+static void write_indirect_azalia_reg(struct audio *audio,
+ uint32_t reg_index,
+ uint32_t reg_data)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+ AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,
+ AZALIA_ENDPOINT_REG_DATA, reg_data);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "AUDIO:write_indirect_azalia_reg: index: %u data: %u\n",
+ reg_index, reg_data);
+}
+
+static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ uint32_t value = 0;
+
+ /* AZALIA_F0_CODEC_ENDPOINT_INDEX endpoint index */
+ REG_SET(AZALIA_F0_CODEC_ENDPOINT_INDEX, 0,
+ AZALIA_ENDPOINT_REG_INDEX, reg_index);
+
+ /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */
+ value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "AUDIO:read_indirect_azalia_reg: index: %u data: %u\n",
+ reg_index, value);
+
+ return value;
+}
+
+static bool is_audio_format_supported(
+ const struct audio_info *audio_info,
+ enum audio_format_code audio_format_code,
+ uint32_t *format_index)
+{
+ uint32_t index;
+ uint32_t max_channe_index = 0;
+ bool found = false;
+
+ if (audio_info == NULL)
+ return found;
+
+ /* pass through whole array */
+ for (index = 0; index < audio_info->mode_count; index++) {
+ if (audio_info->modes[index].format_code == audio_format_code) {
+ if (found) {
+ /* format has multiply entries, choose one with
+ * highst number of channels */
+ if (audio_info->modes[index].channel_count >
+ audio_info->modes[max_channe_index].channel_count) {
+ max_channe_index = index;
+ }
+ } else {
+ /* format found, save it's index */
+ found = true;
+ max_channe_index = index;
+ }
+ }
+ }
+
+ /* return index */
+ if (found && format_index != NULL)
+ *format_index = max_channe_index;
+
+ return found;
+}
+
+/*For HDMI, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_hdmi(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ uint32_t samples;
+ uint32_t h_blank;
+ bool limit_freq_to_48_khz = false;
+ bool limit_freq_to_88_2_khz = false;
+ bool limit_freq_to_96_khz = false;
+ bool limit_freq_to_174_4_khz = false;
+
+ /* For two channels supported return whatever sink support,unmodified*/
+ if (channel_count > 2) {
+
+ /* Based on HDMI spec 1.3 Table 7.5 */
+ if ((crtc_info->requested_pixel_clock <= 27000) &&
+ (crtc_info->v_active <= 576) &&
+ !(crtc_info->interlaced) &&
+ !(crtc_info->pixel_repetition == 2 ||
+ crtc_info->pixel_repetition == 4)) {
+ limit_freq_to_48_khz = true;
+
+ } else if ((crtc_info->requested_pixel_clock <= 27000) &&
+ (crtc_info->v_active <= 576) &&
+ (crtc_info->interlaced) &&
+ (crtc_info->pixel_repetition == 2)) {
+ limit_freq_to_88_2_khz = true;
+
+ } else if ((crtc_info->requested_pixel_clock <= 54000) &&
+ (crtc_info->v_active <= 576) &&
+ !(crtc_info->interlaced)) {
+ limit_freq_to_174_4_khz = true;
+ }
+ }
+
+ /* Also do some calculation for the available Audio Bandwidth for the
+ * 8 ch (i.e. for the Layout 1 => ch > 2)
+ */
+ h_blank = crtc_info->h_total - crtc_info->h_active;
+
+ if (crtc_info->pixel_repetition)
+ h_blank *= crtc_info->pixel_repetition;
+
+ /*based on HDMI spec 1.3 Table 7.5 */
+ h_blank -= 58;
+ /*for Control Period */
+ h_blank -= 16;
+
+ samples = h_blank * 10;
+ /* Number of Audio Packets (multiplied by 10) per Line (for 8 ch number
+ * of Audio samples per line multiplied by 10 - Layout 1)
+ */
+ samples /= 32;
+ samples *= crtc_info->v_active;
+ /*Number of samples multiplied by 10, per second */
+ samples *= crtc_info->refresh_rate;
+ /*Number of Audio samples per second */
+ samples /= 10;
+
+ /* @todo do it after deep color is implemented
+ * 8xx - deep color bandwidth scaling
+ * Extra bandwidth is avaliable in deep color b/c link runs faster than
+ * pixel rate. This has the effect of allowing more tmds characters to
+ * be transmitted during blank
+ */
+
+ switch (crtc_info->color_depth) {
+ case COLOR_DEPTH_888:
+ samples *= 4;
+ break;
+ case COLOR_DEPTH_101010:
+ samples *= 5;
+ break;
+ case COLOR_DEPTH_121212:
+ samples *= 6;
+ break;
+ default:
+ samples *= 4;
+ break;
+ }
+
+ samples /= 4;
+
+ /*check limitation*/
+ if (samples < 88200)
+ limit_freq_to_48_khz = true;
+ else if (samples < 96000)
+ limit_freq_to_88_2_khz = true;
+ else if (samples < 176400)
+ limit_freq_to_96_khz = true;
+ else if (samples < 192000)
+ limit_freq_to_174_4_khz = true;
+
+ if (sample_rates != NULL) {
+ /* limit frequencies */
+ if (limit_freq_to_174_4_khz)
+ sample_rates->rate.RATE_192 = 0;
+
+ if (limit_freq_to_96_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ }
+ if (limit_freq_to_88_2_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ sample_rates->rate.RATE_96 = 0;
+ }
+ if (limit_freq_to_48_khz) {
+ sample_rates->rate.RATE_192 = 0;
+ sample_rates->rate.RATE_176_4 = 0;
+ sample_rates->rate.RATE_96 = 0;
+ sample_rates->rate.RATE_88_2 = 0;
+ }
+ }
+}
+
+/*For DP SST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpsst(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ /* do nothing */
+}
+
+/*For DP MST, calculate if specified sample rates can fit into a given timing */
+static void check_audio_bandwidth_dpmst(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ union audio_sample_rates *sample_rates)
+{
+ /* do nothing */
+}
+
+static void check_audio_bandwidth(
+ const struct audio_crtc_info *crtc_info,
+ uint32_t channel_count,
+ enum signal_type signal,
+ union audio_sample_rates *sample_rates)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ check_audio_bandwidth_hdmi(
+ crtc_info, channel_count, sample_rates);
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ check_audio_bandwidth_dpsst(
+ crtc_info, channel_count, sample_rates);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ check_audio_bandwidth_dpmst(
+ crtc_info, channel_count, sample_rates);
+ break;
+ default:
+ break;
+ }
+}
+
+/* expose/not expose HBR capability to Audio driver */
+static void set_high_bit_rate_capable(
+ struct audio *audio,
+ bool capable)
+{
+ uint32_t value = 0;
+
+ /* set high bit rate audio capable*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR);
+
+ set_reg_field_value(value, capable,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR,
+ HBR_CAPABLE);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR, value);
+}
+
+/* set video latency in in ms/2+1 */
+static void set_video_latency(
+ struct audio *audio,
+ int latency_in_ms)
+{
+ uint32_t value = 0;
+
+ if ((latency_in_ms < 0) || (latency_in_ms > 255))
+ return;
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+ set_reg_field_value(value, latency_in_ms,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ VIDEO_LIPSYNC);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ value);
+}
+
+/* set audio latency in in ms/2+1 */
+static void set_audio_latency(
+ struct audio *audio,
+ int latency_in_ms)
+{
+ uint32_t value = 0;
+
+ if (latency_in_ms < 0)
+ latency_in_ms = 0;
+
+ if (latency_in_ms > 255)
+ latency_in_ms = 255;
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC);
+
+ set_reg_field_value(value, latency_in_ms,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ AUDIO_LIPSYNC);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ value);
+}
+
+void dce_aud_az_enable(struct audio *audio)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+ uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "\n\t========= AUDIO:dce_aud_az_enable: index: %u data: 0x%x\n",
+ audio->inst, value);
+}
+
+void dce_aud_az_disable(struct audio *audio)
+{
+ uint32_t value;
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+ set_reg_field_value(value, 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ dm_logger_write(CTX->logger, LOG_HW_AUDIO,
+ "\n\t========= AUDIO:dce_aud_az_disable: index: %u data: 0x%x\n",
+ audio->inst, value);
+}
+
+void dce_aud_az_configure(
+ struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ uint32_t speakers = audio_info->flags.info.ALLSPEAKERS;
+ uint32_t value;
+ uint32_t field = 0;
+ enum audio_format_code audio_format_code;
+ uint32_t format_index;
+ uint32_t index;
+ bool is_ac3_supported = false;
+ union audio_sample_rates sample_rate;
+ uint32_t strlen = 0;
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+
+ /* Speaker Allocation */
+ /*
+ uint32_t value;
+ uint32_t field = 0;*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+
+ set_reg_field_value(value,
+ speakers,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ SPEAKER_ALLOCATION);
+
+ /* LFE_PLAYBACK_LEVEL = LFEPBL
+ * LFEPBL = 0 : Unknown or refer to other information
+ * LFEPBL = 1 : 0dB playback
+ * LFEPBL = 2 : +10dB playback
+ * LFE_BL = 3 : Reserved
+ */
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ LFE_PLAYBACK_LEVEL);
+ /* todo: according to reg spec LFE_PLAYBACK_LEVEL is read only.
+ * why are we writing to it? DCE8 does not write this */
+
+
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION);
+
+ set_reg_field_value(value,
+ 0,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION);
+
+ field = get_reg_field_value(value,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ EXTRA_CONNECTION_INFO);
+
+ field &= ~0x1;
+
+ set_reg_field_value(value,
+ field,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ EXTRA_CONNECTION_INFO);
+
+ /* set audio for output signal */
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ set_reg_field_value(value,
+ 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION);
+
+ break;
+
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ set_reg_field_value(value,
+ 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, value);
+
+ /* Audio Descriptors */
+ /* pass through all formats */
+ for (format_index = 0; format_index < AUDIO_FORMAT_CODE_COUNT;
+ format_index++) {
+ audio_format_code =
+ (AUDIO_FORMAT_CODE_FIRST + format_index);
+
+ /* those are unsupported, skip programming */
+ if (audio_format_code == AUDIO_FORMAT_CODE_1BITAUDIO ||
+ audio_format_code == AUDIO_FORMAT_CODE_DST)
+ continue;
+
+ value = 0;
+
+ /* check if supported */
+ if (is_audio_format_supported(
+ audio_info, audio_format_code, &index)) {
+ const struct audio_mode *audio_mode =
+ &audio_info->modes[index];
+ union audio_sample_rates sample_rates =
+ audio_mode->sample_rates;
+ uint8_t byte2 = audio_mode->max_bit_rate;
+
+ /* adjust specific properties */
+ switch (audio_format_code) {
+ case AUDIO_FORMAT_CODE_LINEARPCM: {
+ check_audio_bandwidth(
+ crtc_info,
+ audio_mode->channel_count,
+ signal,
+ &sample_rates);
+
+ byte2 = audio_mode->sample_size;
+
+ set_reg_field_value(value,
+ sample_rates.all,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ SUPPORTED_FREQUENCIES_STEREO);
+ }
+ break;
+ case AUDIO_FORMAT_CODE_AC3:
+ is_ac3_supported = true;
+ break;
+ case AUDIO_FORMAT_CODE_DOLBYDIGITALPLUS:
+ case AUDIO_FORMAT_CODE_DTS_HD:
+ case AUDIO_FORMAT_CODE_MAT_MLP:
+ case AUDIO_FORMAT_CODE_DST:
+ case AUDIO_FORMAT_CODE_WMAPRO:
+ byte2 = audio_mode->vendor_specific;
+ break;
+ default:
+ break;
+ }
+
+ /* fill audio format data */
+ set_reg_field_value(value,
+ audio_mode->channel_count - 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ MAX_CHANNELS);
+
+ set_reg_field_value(value,
+ sample_rates.all,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ SUPPORTED_FREQUENCIES);
+
+ set_reg_field_value(value,
+ byte2,
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
+ DESCRIPTOR_BYTE_2);
+ } /* if */
+
+ AZ_REG_WRITE(
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 + format_index,
+ value);
+ } /* for */
+
+ if (is_ac3_supported)
+ /* todo: this reg global. why program global register? */
+ REG_WRITE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS,
+ 0x05);
+
+ /* check for 192khz/8-Ch support for HBR requirements */
+ sample_rate.all = 0;
+ sample_rate.rate.RATE_192 = 1;
+
+ check_audio_bandwidth(
+ crtc_info,
+ 8,
+ signal,
+ &sample_rate);
+
+ set_high_bit_rate_capable(audio, sample_rate.rate.RATE_192);
+
+ /* Audio and Video Lipsync */
+ set_video_latency(audio, audio_info->video_latency);
+ set_audio_latency(audio, audio_info->audio_latency);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->manufacture_id,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ MANUFACTURER_ID);
+
+ set_reg_field_value(value, audio_info->product_id,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ PRODUCT_ID);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0,
+ value);
+
+ value = 0;
+
+ /*get display name string length */
+ while (audio_info->display_name[strlen++] != '\0') {
+ if (strlen >=
+ MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS)
+ break;
+ }
+ set_reg_field_value(value, strlen,
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ SINK_DESCRIPTION_LEN);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1,
+ value);
+
+ /*
+ *write the port ID:
+ *PORT_ID0 = display index
+ *PORT_ID1 = 16bit BDF
+ *(format MSB->LSB: 8bit Bus, 5bit Device, 3bit Function)
+ */
+
+ value = 0;
+
+ set_reg_field_value(value, audio_info->port_id[0],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2,
+ PORT_ID0);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->port_id[1],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3,
+ PORT_ID1);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3, value);
+
+ /*write the 18 char monitor string */
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[0],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION0);
+
+ set_reg_field_value(value, audio_info->display_name[1],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION1);
+
+ set_reg_field_value(value, audio_info->display_name[2],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION2);
+
+ set_reg_field_value(value, audio_info->display_name[3],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4,
+ DESCRIPTION3);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[4],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION4);
+
+ set_reg_field_value(value, audio_info->display_name[5],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION5);
+
+ set_reg_field_value(value, audio_info->display_name[6],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION6);
+
+ set_reg_field_value(value, audio_info->display_name[7],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5,
+ DESCRIPTION7);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[8],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION8);
+
+ set_reg_field_value(value, audio_info->display_name[9],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION9);
+
+ set_reg_field_value(value, audio_info->display_name[10],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION10);
+
+ set_reg_field_value(value, audio_info->display_name[11],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6,
+ DESCRIPTION11);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[12],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION12);
+
+ set_reg_field_value(value, audio_info->display_name[13],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION13);
+
+ set_reg_field_value(value, audio_info->display_name[14],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION14);
+
+ set_reg_field_value(value, audio_info->display_name[15],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7,
+ DESCRIPTION15);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7, value);
+
+ value = 0;
+ set_reg_field_value(value, audio_info->display_name[16],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+ DESCRIPTION16);
+
+ set_reg_field_value(value, audio_info->display_name[17],
+ AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8,
+ DESCRIPTION17);
+
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8, value);
+}
+
+/*
+* todo: wall clk related functionality probably belong to clock_src.
+*/
+
+/* search pixel clock value for Azalia HDMI Audio */
+static void get_azalia_clock_info_hdmi(
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct azalia_clock_info *azalia_clock_info)
+{
+ /* audio_dto_phase= 24 * 10,000;
+ * 24MHz in [100Hz] units */
+ azalia_clock_info->audio_dto_phase =
+ 24 * 10000;
+
+ /* audio_dto_module = PCLKFrequency * 10,000;
+ * [khz] -> [100Hz] */
+ azalia_clock_info->audio_dto_module =
+ actual_pixel_clock_in_khz * 10;
+}
+
+static void get_azalia_clock_info_dp(
+ uint32_t requested_pixel_clock_in_khz,
+ const struct audio_pll_info *pll_info,
+ struct azalia_clock_info *azalia_clock_info)
+{
+ /* Reported dpDtoSourceClockInkhz value for
+ * DCE8 already adjusted for SS, do not need any
+ * adjustment here anymore
+ */
+
+ /*audio_dto_phase = 24 * 10,000;
+ * 24MHz in [100Hz] units */
+ azalia_clock_info->audio_dto_phase = 24 * 10000;
+
+ /*audio_dto_module = dpDtoSourceClockInkhz * 10,000;
+ * [khz] ->[100Hz] */
+ azalia_clock_info->audio_dto_module =
+ pll_info->dp_dto_source_clock_in_khz * 10;
+}
+
+void dce_aud_wall_dto_setup(
+ struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info)
+{
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ struct azalia_clock_info clock_info = { 0 };
+
+ if (dc_is_hdmi_signal(signal)) {
+ uint32_t src_sel;
+
+ /*DTO0 Programming goal:
+ -generate 24MHz, 128*Fs from 24MHz
+ -use DTO0 when an active HDMI port is connected
+ (optionally a DP is connected) */
+
+ /* calculate DTO settings */
+ get_azalia_clock_info_hdmi(
+ crtc_info->requested_pixel_clock,
+ crtc_info->calculated_pixel_clock,
+ &clock_info);
+
+ dm_logger_write(audio->ctx->logger, LOG_HW_AUDIO,\
+ "\n%s:Input::requested_pixel_clock = %d"\
+ "calculated_pixel_clock =%d\n"\
+ "audio_dto_module = %d audio_dto_phase =%d \n\n", __func__,\
+ crtc_info->requested_pixel_clock,\
+ crtc_info->calculated_pixel_clock,\
+ clock_info.audio_dto_module,\
+ clock_info.audio_dto_phase);
+
+ /* On TN/SI, Program DTO source select and DTO select before
+ programming DTO modulo and DTO phase. These bits must be
+ programmed first, otherwise there will be no HDMI audio at boot
+ up. This is a HW sequence change (different from old ASICs).
+ Caution when changing this programming sequence.
+
+ HDMI enabled, using DTO0
+ program master CRTC for DTO0 */
+ src_sel = pll_info->dto_source - DTO_SOURCE_ID0;
+ REG_UPDATE_2(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO0_SOURCE_SEL, src_sel,
+ DCCG_AUDIO_DTO_SEL, 0);
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO0_MODULE,
+ DCCG_AUDIO_DTO0_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO0_PHASE,
+ DCCG_AUDIO_DTO0_PHASE, clock_info.audio_dto_phase);
+ } else {
+ /*DTO1 Programming goal:
+ -generate 24MHz, 512*Fs, 128*Fs from 24MHz
+ -default is to used DTO1, and switch to DTO0 when an audio
+ master HDMI port is connected
+ -use as default for DP
+
+ calculate DTO settings */
+ get_azalia_clock_info_dp(
+ crtc_info->requested_pixel_clock,
+ pll_info,
+ &clock_info);
+
+ /* Program DTO select before programming DTO modulo and DTO
+ phase. default to use DTO1 */
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+ /* DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1)
+ * Select 512fs for DP TODO: web register definition
+ * does not match register header file
+ * DCE11 version it's commented out while DCE8 it's set to 1
+ */
+
+ /* module */
+ REG_UPDATE(DCCG_AUDIO_DTO1_MODULE,
+ DCCG_AUDIO_DTO1_MODULE, clock_info.audio_dto_module);
+
+ /* phase */
+ REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
+ DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
+
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
+
+ }
+}
+
+static bool dce_aud_endpoint_valid(struct audio *audio)
+{
+ uint32_t value;
+ uint32_t port_connectivity;
+
+ value = AZ_REG_READ(
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+
+ port_connectivity = get_reg_field_value(value,
+ AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
+ PORT_CONNECTIVITY);
+
+ return !(port_connectivity == 1);
+}
+
+/* initialize HW state */
+void dce_aud_hw_init(
+ struct audio *audio)
+{
+ uint32_t value;
+ struct dce_audio *aud = DCE_AUD(audio);
+
+ /* we only need to program the following registers once, so we only do
+ it for the inst 0*/
+ if (audio->inst != 0)
+ return;
+
+ /* Suport R5 - 32khz
+ * Suport R6 - 44.1khz
+ * Suport R7 - 48khz
+ */
+ /*disable clock gating before write to endpoint register*/
+ value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
+ REG_UPDATE(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES,
+ AUDIO_RATE_CAPABILITIES, 0x70);
+
+ /*Keep alive bit to verify HW block in BU. */
+ REG_UPDATE_2(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES,
+ CLKSTOP, 1,
+ EPSS, 1);
+}
+
+static const struct audio_funcs funcs = {
+ .endpoint_valid = dce_aud_endpoint_valid,
+ .hw_init = dce_aud_hw_init,
+ .wall_dto_setup = dce_aud_wall_dto_setup,
+ .az_enable = dce_aud_az_enable,
+ .az_disable = dce_aud_az_disable,
+ .az_configure = dce_aud_az_configure,
+ .destroy = dce_aud_destroy,
+};
+
+void dce_aud_destroy(struct audio **audio)
+{
+ struct dce_audio *aud = DCE_AUD(*audio);
+
+ kfree(aud);
+ *audio = NULL;
+}
+
+struct audio *dce_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_aduio_mask *masks
+ )
+{
+ struct dce_audio *audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+
+ if (audio == NULL) {
+ ASSERT_CRITICAL(audio);
+ return NULL;
+ }
+
+ audio->base.ctx = ctx;
+ audio->base.inst = inst;
+ audio->base.funcs = &funcs;
+
+ audio->regs = reg;
+ audio->shifts = shifts;
+ audio->masks = masks;
+
+ return &audio->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
new file mode 100644
index 000000000000..0dc5ff137c7a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_AUDIO_DCE_110_H__
+#define __DAL_AUDIO_DCE_110_H__
+
+#include "audio.h"
+
+#define AUD_COMMON_REG_LIST(id)\
+ SRI(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZF0ENDPOINT, id),\
+ SRI(AZALIA_F0_CODEC_ENDPOINT_DATA, AZF0ENDPOINT, id),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES),\
+ SR(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES),\
+ SR(DCCG_AUDIO_DTO_SOURCE),\
+ SR(DCCG_AUDIO_DTO0_MODULE),\
+ SR(DCCG_AUDIO_DTO0_PHASE),\
+ SR(DCCG_AUDIO_DTO1_MODULE),\
+ SR(DCCG_AUDIO_DTO1_PHASE)
+
+
+ /* set field name */
+#define SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+
+#define AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+ SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO2_USE_512FBR_DTO, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_MODULE, DCCG_AUDIO_DTO0_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO0_PHASE, DCCG_AUDIO_DTO0_PHASE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_MODULE, DCCG_AUDIO_DTO1_MODULE, mask_sh),\
+ SF(DCCG_AUDIO_DTO1_PHASE, DCCG_AUDIO_DTO1_PHASE, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES, AUDIO_RATE_CAPABILITIES, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, CLKSTOP, mask_sh),\
+ SF(AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES, EPSS, mask_sh)
+
+#define AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh),\
+ SF(AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh)
+
+
+struct dce_audio_registers {
+ uint32_t AZALIA_F0_CODEC_ENDPOINT_INDEX;
+ uint32_t AZALIA_F0_CODEC_ENDPOINT_DATA;
+
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS;
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES;
+ uint32_t AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES;
+
+ uint32_t DCCG_AUDIO_DTO_SOURCE;
+ uint32_t DCCG_AUDIO_DTO0_MODULE;
+ uint32_t DCCG_AUDIO_DTO0_PHASE;
+ uint32_t DCCG_AUDIO_DTO1_MODULE;
+ uint32_t DCCG_AUDIO_DTO1_PHASE;
+
+ uint32_t AUDIO_RATE_CAPABILITIES;
+};
+
+struct dce_audio_shift {
+ uint8_t AZALIA_ENDPOINT_REG_INDEX;
+ uint8_t AZALIA_ENDPOINT_REG_DATA;
+
+ uint8_t AUDIO_RATE_CAPABILITIES;
+ uint8_t CLKSTOP;
+ uint8_t EPSS;
+
+ uint8_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+ uint8_t DCCG_AUDIO_DTO_SEL;
+ uint8_t DCCG_AUDIO_DTO0_MODULE;
+ uint8_t DCCG_AUDIO_DTO0_PHASE;
+ uint8_t DCCG_AUDIO_DTO1_MODULE;
+ uint8_t DCCG_AUDIO_DTO1_PHASE;
+ uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
+};
+
+struct dce_aduio_mask {
+ uint32_t AZALIA_ENDPOINT_REG_INDEX;
+ uint32_t AZALIA_ENDPOINT_REG_DATA;
+
+ uint32_t AUDIO_RATE_CAPABILITIES;
+ uint32_t CLKSTOP;
+ uint32_t EPSS;
+
+ uint32_t DCCG_AUDIO_DTO0_SOURCE_SEL;
+ uint32_t DCCG_AUDIO_DTO_SEL;
+ uint32_t DCCG_AUDIO_DTO0_MODULE;
+ uint32_t DCCG_AUDIO_DTO0_PHASE;
+ uint32_t DCCG_AUDIO_DTO1_MODULE;
+ uint32_t DCCG_AUDIO_DTO1_PHASE;
+ uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;
+};
+
+struct dce_audio {
+ struct audio base;
+ const struct dce_audio_registers *regs;
+ const struct dce_audio_shift *shifts;
+ const struct dce_aduio_mask *masks;
+};
+
+struct audio *dce_audio_create(
+ struct dc_context *ctx,
+ unsigned int inst,
+ const struct dce_audio_registers *reg,
+ const struct dce_audio_shift *shifts,
+ const struct dce_aduio_mask *masks);
+
+void dce_aud_destroy(struct audio **audio);
+
+void dce_aud_hw_init(struct audio *audio);
+
+void dce_aud_az_enable(struct audio *audio);
+void dce_aud_az_disable(struct audio *audio);
+
+void dce_aud_az_configure(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info);
+
+void dce_aud_wall_dto_setup(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info);
+
+#endif /*__DAL_AUDIO_DCE_110_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
new file mode 100644
index 000000000000..31280d252753
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -0,0 +1,1383 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+
+#include "dc_types.h"
+#include "core_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+
+#include "dce_clock_source.h"
+
+#include "reg_helper.h"
+
+#define REG(reg)\
+ (clk_src->regs->reg)
+
+#define CTX \
+ clk_src->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_src->cs_shift->field_name, clk_src->cs_mask->field_name
+
+#define FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM 6
+#define CALC_PLL_CLK_SRC_ERR_TOLERANCE 1
+#define MAX_PLL_CALC_ERROR 0xFFFFFFFF
+
+static const struct spread_spectrum_data *get_ss_data_entry(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal,
+ uint32_t pix_clk_khz)
+{
+
+ uint32_t entrys_num;
+ uint32_t i;
+ struct spread_spectrum_data *ss_parm = NULL;
+ struct spread_spectrum_data *ret = NULL;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ ss_parm = clk_src->dvi_ss_params;
+ entrys_num = clk_src->dvi_ss_params_cnt;
+ break;
+
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ss_parm = clk_src->hdmi_ss_params;
+ entrys_num = clk_src->hdmi_ss_params_cnt;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_VIRTUAL:
+ ss_parm = clk_src->dp_ss_params;
+ entrys_num = clk_src->dp_ss_params_cnt;
+ break;
+
+ default:
+ ss_parm = NULL;
+ entrys_num = 0;
+ break;
+ }
+
+ if (ss_parm == NULL)
+ return ret;
+
+ for (i = 0; i < entrys_num; ++i, ++ss_parm) {
+ if (ss_parm->freq_range_khz >= pix_clk_khz) {
+ ret = ss_parm;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+* Function: calculate_fb_and_fractional_fb_divider
+*
+* * DESCRIPTION: Calculates feedback and fractional feedback dividers values
+*
+*PARAMETERS:
+* targetPixelClock Desired frequency in 10 KHz
+* ref_divider Reference divider (already known)
+* postDivider Post Divider (already known)
+* feedback_divider_param Pointer where to store
+* calculated feedback divider value
+* fract_feedback_divider_param Pointer where to store
+* calculated fract feedback divider value
+*
+*RETURNS:
+* It fills the locations pointed by feedback_divider_param
+* and fract_feedback_divider_param
+* It returns - true if feedback divider not 0
+* - false should never happen)
+*/
+static bool calculate_fb_and_fractional_fb_divider(
+ struct calc_pll_clock_source *calc_pll_cs,
+ uint32_t target_pix_clk_khz,
+ uint32_t ref_divider,
+ uint32_t post_divider,
+ uint32_t *feedback_divider_param,
+ uint32_t *fract_feedback_divider_param)
+{
+ uint64_t feedback_divider;
+
+ feedback_divider =
+ (uint64_t)(target_pix_clk_khz * ref_divider * post_divider);
+ feedback_divider *= 10;
+ /* additional factor, since we divide by 10 afterwards */
+ feedback_divider *= (uint64_t)(calc_pll_cs->fract_fb_divider_factor);
+ feedback_divider = div_u64(feedback_divider, calc_pll_cs->ref_freq_khz);
+
+/*Round to the number of precision
+ * The following code replace the old code (ullfeedbackDivider + 5)/10
+ * for example if the difference between the number
+ * of fractional feedback decimal point and the fractional FB Divider precision
+ * is 2 then the equation becomes (ullfeedbackDivider + 5*100) / (10*100))*/
+
+ feedback_divider += (uint64_t)
+ (5 * calc_pll_cs->fract_fb_divider_precision_factor);
+ feedback_divider =
+ div_u64(feedback_divider,
+ calc_pll_cs->fract_fb_divider_precision_factor * 10);
+ feedback_divider *= (uint64_t)
+ (calc_pll_cs->fract_fb_divider_precision_factor);
+
+ *feedback_divider_param =
+ div_u64_rem(
+ feedback_divider,
+ calc_pll_cs->fract_fb_divider_factor,
+ fract_feedback_divider_param);
+
+ if (*feedback_divider_param != 0)
+ return true;
+ return false;
+}
+
+/**
+*calc_fb_divider_checking_tolerance
+*
+*DESCRIPTION: Calculates Feedback and Fractional Feedback divider values
+* for passed Reference and Post divider, checking for tolerance.
+*PARAMETERS:
+* pll_settings Pointer to structure
+* ref_divider Reference divider (already known)
+* postDivider Post Divider (already known)
+* tolerance Tolerance for Calculated Pixel Clock to be within
+*
+*RETURNS:
+* It fills the PLLSettings structure with PLL Dividers values
+* if calculated values are within required tolerance
+* It returns - true if eror is within tolerance
+* - false if eror is not within tolerance
+*/
+static bool calc_fb_divider_checking_tolerance(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings,
+ uint32_t ref_divider,
+ uint32_t post_divider,
+ uint32_t tolerance)
+{
+ uint32_t feedback_divider;
+ uint32_t fract_feedback_divider;
+ uint32_t actual_calculated_clock_khz;
+ uint32_t abs_err;
+ uint64_t actual_calc_clk_khz;
+
+ calculate_fb_and_fractional_fb_divider(
+ calc_pll_cs,
+ pll_settings->adjusted_pix_clk,
+ ref_divider,
+ post_divider,
+ &feedback_divider,
+ &fract_feedback_divider);
+
+ /*Actual calculated value*/
+ actual_calc_clk_khz = (uint64_t)(feedback_divider *
+ calc_pll_cs->fract_fb_divider_factor) +
+ fract_feedback_divider;
+ actual_calc_clk_khz *= calc_pll_cs->ref_freq_khz;
+ actual_calc_clk_khz =
+ div_u64(actual_calc_clk_khz,
+ ref_divider * post_divider *
+ calc_pll_cs->fract_fb_divider_factor);
+
+ actual_calculated_clock_khz = (uint32_t)(actual_calc_clk_khz);
+
+ abs_err = (actual_calculated_clock_khz >
+ pll_settings->adjusted_pix_clk)
+ ? actual_calculated_clock_khz -
+ pll_settings->adjusted_pix_clk
+ : pll_settings->adjusted_pix_clk -
+ actual_calculated_clock_khz;
+
+ if (abs_err <= tolerance) {
+ /*found good values*/
+ pll_settings->reference_freq = calc_pll_cs->ref_freq_khz;
+ pll_settings->reference_divider = ref_divider;
+ pll_settings->feedback_divider = feedback_divider;
+ pll_settings->fract_feedback_divider = fract_feedback_divider;
+ pll_settings->pix_clk_post_divider = post_divider;
+ pll_settings->calculated_pix_clk =
+ actual_calculated_clock_khz;
+ pll_settings->vco_freq =
+ actual_calculated_clock_khz * post_divider;
+ return true;
+ }
+ return false;
+}
+
+static bool calc_pll_dividers_in_range(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings,
+ uint32_t min_ref_divider,
+ uint32_t max_ref_divider,
+ uint32_t min_post_divider,
+ uint32_t max_post_divider,
+ uint32_t err_tolerance)
+{
+ uint32_t ref_divider;
+ uint32_t post_divider;
+ uint32_t tolerance;
+
+/* This is err_tolerance / 10000 = 0.0025 - acceptable error of 0.25%
+ * This is errorTolerance / 10000 = 0.0001 - acceptable error of 0.01%*/
+ tolerance = (pll_settings->adjusted_pix_clk * err_tolerance) /
+ 10000;
+ if (tolerance < CALC_PLL_CLK_SRC_ERR_TOLERANCE)
+ tolerance = CALC_PLL_CLK_SRC_ERR_TOLERANCE;
+
+ for (
+ post_divider = max_post_divider;
+ post_divider >= min_post_divider;
+ --post_divider) {
+ for (
+ ref_divider = min_ref_divider;
+ ref_divider <= max_ref_divider;
+ ++ref_divider) {
+ if (calc_fb_divider_checking_tolerance(
+ calc_pll_cs,
+ pll_settings,
+ ref_divider,
+ post_divider,
+ tolerance)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static uint32_t calculate_pixel_clock_pll_dividers(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct pll_settings *pll_settings)
+{
+ uint32_t err_tolerance;
+ uint32_t min_post_divider;
+ uint32_t max_post_divider;
+ uint32_t min_ref_divider;
+ uint32_t max_ref_divider;
+
+ if (pll_settings->adjusted_pix_clk == 0) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Bad requested pixel clock", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+/* 1) Find Post divider ranges */
+ if (pll_settings->pix_clk_post_divider) {
+ min_post_divider = pll_settings->pix_clk_post_divider;
+ max_post_divider = pll_settings->pix_clk_post_divider;
+ } else {
+ min_post_divider = calc_pll_cs->min_pix_clock_pll_post_divider;
+ if (min_post_divider * pll_settings->adjusted_pix_clk <
+ calc_pll_cs->min_vco_khz) {
+ min_post_divider = calc_pll_cs->min_vco_khz /
+ pll_settings->adjusted_pix_clk;
+ if ((min_post_divider *
+ pll_settings->adjusted_pix_clk) <
+ calc_pll_cs->min_vco_khz)
+ min_post_divider++;
+ }
+
+ max_post_divider = calc_pll_cs->max_pix_clock_pll_post_divider;
+ if (max_post_divider * pll_settings->adjusted_pix_clk
+ > calc_pll_cs->max_vco_khz)
+ max_post_divider = calc_pll_cs->max_vco_khz /
+ pll_settings->adjusted_pix_clk;
+ }
+
+/* 2) Find Reference divider ranges
+ * When SS is enabled, or for Display Port even without SS,
+ * pll_settings->referenceDivider is not zero.
+ * So calculate PPLL FB and fractional FB divider
+ * using the passed reference divider*/
+
+ if (pll_settings->reference_divider) {
+ min_ref_divider = pll_settings->reference_divider;
+ max_ref_divider = pll_settings->reference_divider;
+ } else {
+ min_ref_divider = ((calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->max_pll_input_freq_khz)
+ > calc_pll_cs->min_pll_ref_divider)
+ ? calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->max_pll_input_freq_khz
+ : calc_pll_cs->min_pll_ref_divider;
+
+ max_ref_divider = ((calc_pll_cs->ref_freq_khz
+ / calc_pll_cs->min_pll_input_freq_khz)
+ < calc_pll_cs->max_pll_ref_divider)
+ ? calc_pll_cs->ref_freq_khz /
+ calc_pll_cs->min_pll_input_freq_khz
+ : calc_pll_cs->max_pll_ref_divider;
+ }
+
+/* If some parameters are invalid we could have scenario when "min">"max"
+ * which produced endless loop later.
+ * We should investigate why we get the wrong parameters.
+ * But to follow the similar logic when "adjustedPixelClock" is set to be 0
+ * it is better to return here than cause system hang/watchdog timeout later.
+ * ## SVS Wed 15 Jul 2009 */
+
+ if (min_post_divider > max_post_divider) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Post divider range is invalid", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+ if (min_ref_divider > max_ref_divider) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "%s Reference divider range is invalid", __func__);
+ return MAX_PLL_CALC_ERROR;
+ }
+
+/* 3) Try to find PLL dividers given ranges
+ * starting with minimal error tolerance.
+ * Increase error tolerance until PLL dividers found*/
+ err_tolerance = MAX_PLL_CALC_ERROR;
+
+ while (!calc_pll_dividers_in_range(
+ calc_pll_cs,
+ pll_settings,
+ min_ref_divider,
+ max_ref_divider,
+ min_post_divider,
+ max_post_divider,
+ err_tolerance))
+ err_tolerance += (err_tolerance > 10)
+ ? (err_tolerance / 10)
+ : 1;
+
+ return err_tolerance;
+}
+
+static bool pll_adjust_pix_clk(
+ struct dce110_clk_src *clk_src,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t actual_pix_clk_khz = 0;
+ uint32_t requested_clk_khz = 0;
+ struct bp_adjust_pixel_clock_parameters bp_adjust_pixel_clock_params = {
+ 0 };
+ enum bp_result bp_result;
+ switch (pix_clk_params->signal_type) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ requested_clk_khz = pix_clk_params->requested_pix_clk;
+ if (pix_clk_params->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ requested_clk_khz = (requested_clk_khz * 5) >> 2;
+ break; /* x1.25*/
+ case COLOR_DEPTH_121212:
+ requested_clk_khz = (requested_clk_khz * 6) >> 2;
+ break; /* x1.5*/
+ case COLOR_DEPTH_161616:
+ requested_clk_khz = requested_clk_khz * 2;
+ break; /* x2.0*/
+ default:
+ break;
+ }
+ }
+ actual_pix_clk_khz = requested_clk_khz;
+ }
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ requested_clk_khz = pix_clk_params->requested_sym_clk;
+ actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+ break;
+
+ default:
+ requested_clk_khz = pix_clk_params->requested_pix_clk;
+ actual_pix_clk_khz = pix_clk_params->requested_pix_clk;
+ break;
+ }
+
+ bp_adjust_pixel_clock_params.pixel_clock = requested_clk_khz;
+ bp_adjust_pixel_clock_params.
+ encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_adjust_pixel_clock_params.signal_type = pix_clk_params->signal_type;
+ bp_adjust_pixel_clock_params.
+ ss_enable = pix_clk_params->flags.ENABLE_SS;
+ bp_result = clk_src->bios->funcs->adjust_pixel_clock(
+ clk_src->bios, &bp_adjust_pixel_clock_params);
+ if (bp_result == BP_RESULT_OK) {
+ pll_settings->actual_pix_clk = actual_pix_clk_khz;
+ pll_settings->adjusted_pix_clk =
+ bp_adjust_pixel_clock_params.adjusted_pixel_clock;
+ pll_settings->reference_divider =
+ bp_adjust_pixel_clock_params.reference_divider;
+ pll_settings->pix_clk_post_divider =
+ bp_adjust_pixel_clock_params.pixel_clock_post_divider;
+
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Calculate PLL Dividers for given Clock Value.
+ * First will call VBIOS Adjust Exec table to check if requested Pixel clock
+ * will be Adjusted based on usage.
+ * Then it will calculate PLL Dividers for this Adjusted clock using preferred
+ * method (Maximum VCO frequency).
+ *
+ * \return
+ * Calculation error in units of 0.01%
+ */
+
+static uint32_t dce110_get_pix_clk_dividers_helper (
+ struct dce110_clk_src *clk_src,
+ struct pll_settings *pll_settings,
+ struct pixel_clk_params *pix_clk_params)
+{
+ uint32_t field = 0;
+ uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+ /* Check if reference clock is external (not pcie/xtalin)
+ * HW Dce80 spec:
+ * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB
+ * 04 - HSYNCA, 05 - GENLK_CLK, 06 - PCIE_REFCLK, 07 - DVOCLK0 */
+ REG_GET(PLL_CNTL, PLL_REF_DIV_SRC, &field);
+ pll_settings->use_external_clk = (field > 1);
+
+ /* VBIOS by default enables DP SS (spread on IDCLK) for DCE 8.0 always
+ * (we do not care any more from SI for some older DP Sink which
+ * does not report SS support, no known issues) */
+ if ((pix_clk_params->flags.ENABLE_SS) ||
+ (dc_is_dp_signal(pix_clk_params->signal_type))) {
+
+ const struct spread_spectrum_data *ss_data = get_ss_data_entry(
+ clk_src,
+ pix_clk_params->signal_type,
+ pll_settings->adjusted_pix_clk);
+
+ if (NULL != ss_data)
+ pll_settings->ss_percentage = ss_data->percentage;
+ }
+
+ /* Check VBIOS AdjustPixelClock Exec table */
+ if (!pll_adjust_pix_clk(clk_src, pix_clk_params, pll_settings)) {
+ /* Should never happen, ASSERT and fill up values to be able
+ * to continue. */
+ dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ "%s: Failed to adjust pixel clock!!", __func__);
+ pll_settings->actual_pix_clk =
+ pix_clk_params->requested_pix_clk;
+ pll_settings->adjusted_pix_clk =
+ pix_clk_params->requested_pix_clk;
+
+ if (dc_is_dp_signal(pix_clk_params->signal_type))
+ pll_settings->adjusted_pix_clk = 100000;
+ }
+
+ /* Calculate Dividers */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
+ /*Calculate Dividers by HDMI object, no SS case or SS case */
+ pll_calc_error =
+ calculate_pixel_clock_pll_dividers(
+ &clk_src->calc_pll_hdmi,
+ pll_settings);
+ else
+ /*Calculate Dividers by default object, no SS case or SS case */
+ pll_calc_error =
+ calculate_pixel_clock_pll_dividers(
+ &clk_src->calc_pll,
+ pll_settings);
+
+ return pll_calc_error;
+}
+
+static void dce112_get_pix_clk_dividers_helper (
+ struct dce110_clk_src *clk_src,
+ struct pll_settings *pll_settings,
+ struct pixel_clk_params *pix_clk_params)
+{
+ uint32_t actualPixelClockInKHz;
+
+ actualPixelClockInKHz = pix_clk_params->requested_pix_clk;
+ /* Calculate Dividers */
+ if (pix_clk_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (pix_clk_params->color_depth) {
+ case COLOR_DEPTH_101010:
+ actualPixelClockInKHz = (actualPixelClockInKHz * 5) >> 2;
+ break;
+ case COLOR_DEPTH_121212:
+ actualPixelClockInKHz = (actualPixelClockInKHz * 6) >> 2;
+ break;
+ case COLOR_DEPTH_161616:
+ actualPixelClockInKHz = actualPixelClockInKHz * 2;
+ break;
+ default:
+ break;
+ }
+ }
+ pll_settings->actual_pix_clk = actualPixelClockInKHz;
+ pll_settings->adjusted_pix_clk = actualPixelClockInKHz;
+ pll_settings->calculated_pix_clk = pix_clk_params->requested_pix_clk;
+}
+
+static uint32_t dce110_get_pix_clk_dividers(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+ uint32_t pll_calc_error = MAX_PLL_CALC_ERROR;
+
+ if (pix_clk_params == NULL || pll_settings == NULL
+ || pix_clk_params->requested_pix_clk == 0) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_ERROR,
+ "%s: Invalid parameters!!\n", __func__);
+ return pll_calc_error;
+ }
+
+ memset(pll_settings, 0, sizeof(*pll_settings));
+
+ if (cs->id == CLOCK_SOURCE_ID_DP_DTO ||
+ cs->id == CLOCK_SOURCE_ID_EXTERNAL) {
+ pll_settings->adjusted_pix_clk = clk_src->ext_clk_khz;
+ pll_settings->calculated_pix_clk = clk_src->ext_clk_khz;
+ pll_settings->actual_pix_clk =
+ pix_clk_params->requested_pix_clk;
+ return 0;
+ }
+
+ switch (cs->ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ pll_calc_error =
+ dce110_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+ break;
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_12_0:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+#endif
+
+ dce112_get_pix_clk_dividers_helper(clk_src,
+ pll_settings, pix_clk_params);
+ break;
+ default:
+ break;
+ }
+
+ return pll_calc_error;
+}
+
+static uint32_t dce110_get_pll_pixel_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dc *dc_core = cs->ctx->dc;
+ struct dc_state *context = dc_core->current_state;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+
+ /* This function need separate to different DCE version, before separate, just use pixel clock */
+ return pipe_ctx->stream->phy_pix_clk;
+
+}
+
+static uint32_t dce110_get_dp_pixel_rate_from_combo_phy_pll(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dc *dc_core = cs->ctx->dc;
+ struct dc_state *context = dc_core->current_state;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[inst];
+
+ /* This function need separate to different DCE version, before separate, just use pixel clock */
+ return pipe_ctx->stream->phy_pix_clk;
+}
+
+static uint32_t dce110_get_d_to_pixel_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs);
+ int dto_enabled = 0;
+ struct fixed31_32 pix_rate;
+
+ REG_GET(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, &dto_enabled);
+
+ if (dto_enabled) {
+ uint32_t phase = 0;
+ uint32_t modulo = 0;
+ REG_GET(PHASE[inst], DP_DTO0_PHASE, &phase);
+ REG_GET(MODULO[inst], DP_DTO0_MODULO, &modulo);
+
+ if (modulo == 0) {
+ return 0;
+ }
+
+ pix_rate = dal_fixed31_32_from_int(clk_src->ref_freq_khz);
+ pix_rate = dal_fixed31_32_mul_int(pix_rate, 1000);
+ pix_rate = dal_fixed31_32_mul_int(pix_rate, phase);
+ pix_rate = dal_fixed31_32_div_int(pix_rate, modulo);
+
+ return dal_fixed31_32_round(pix_rate);
+ } else {
+ return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings);
+ }
+}
+
+static uint32_t dce110_get_pix_rate_in_hz(
+ struct clock_source *cs,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ uint32_t pix_rate = 0;
+ switch (pix_clk_params->signal_type) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_VIRTUAL:
+ pix_rate = dce110_get_d_to_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ default:
+ pix_rate = dce110_get_pll_pixel_rate_in_hz(cs, pix_clk_params, pll_settings);
+ break;
+ }
+
+ return pix_rate;
+}
+
+static bool disable_spread_spectrum(struct dce110_clk_src *clk_src)
+{
+ enum bp_result result;
+ struct bp_spread_spectrum_parameters bp_ss_params = {0};
+
+ bp_ss_params.pll_id = clk_src->base.id;
+
+ /*Call ASICControl to process ATOMBIOS Exec table*/
+ result = clk_src->bios->funcs->enable_spread_spectrum_on_ppll(
+ clk_src->bios,
+ &bp_ss_params,
+ false);
+
+ return result == BP_RESULT_OK;
+}
+
+static bool calculate_ss(
+ const struct pll_settings *pll_settings,
+ const struct spread_spectrum_data *ss_data,
+ struct delta_sigma_data *ds_data)
+{
+ struct fixed32_32 fb_div;
+ struct fixed32_32 ss_amount;
+ struct fixed32_32 ss_nslip_amount;
+ struct fixed32_32 ss_ds_frac_amount;
+ struct fixed32_32 ss_step_size;
+ struct fixed32_32 modulation_time;
+
+ if (ds_data == NULL)
+ return false;
+ if (ss_data == NULL)
+ return false;
+ if (ss_data->percentage == 0)
+ return false;
+ if (pll_settings == NULL)
+ return false;
+
+ memset(ds_data, 0, sizeof(struct delta_sigma_data));
+
+ /* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/
+ /* 6 decimal point support in fractional feedback divider */
+ fb_div = dal_fixed32_32_from_fraction(
+ pll_settings->fract_feedback_divider, 1000000);
+ fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider);
+
+ ds_data->ds_frac_amount = 0;
+ /*spreadSpectrumPercentage is in the unit of .01%,
+ * so have to divided by 100 * 100*/
+ ss_amount = dal_fixed32_32_mul(
+ fb_div, dal_fixed32_32_from_fraction(ss_data->percentage,
+ 100 * ss_data->percentage_divider));
+ ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount);
+
+ ss_nslip_amount = dal_fixed32_32_sub(ss_amount,
+ dal_fixed32_32_from_int(ds_data->feedback_amount));
+ ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10);
+ ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount);
+
+ ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount,
+ dal_fixed32_32_from_int(ds_data->nfrac_amount));
+ ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536);
+ ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount);
+
+ /* compute SS_STEP_SIZE_DSFRAC */
+ modulation_time = dal_fixed32_32_from_fraction(
+ pll_settings->reference_freq * 1000,
+ pll_settings->reference_divider * ss_data->modulation_freq_hz);
+
+ if (ss_data->flags.CENTER_SPREAD)
+ modulation_time = dal_fixed32_32_div_int(modulation_time, 4);
+ else
+ modulation_time = dal_fixed32_32_div_int(modulation_time, 2);
+
+ ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time);
+ /* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/
+ ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10);
+ ds_data->ds_frac_size = dal_fixed32_32_floor(ss_step_size);
+
+ return true;
+}
+
+static bool enable_spread_spectrum(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal, struct pll_settings *pll_settings)
+{
+ struct bp_spread_spectrum_parameters bp_params = {0};
+ struct delta_sigma_data d_s_data;
+ const struct spread_spectrum_data *ss_data = NULL;
+
+ ss_data = get_ss_data_entry(
+ clk_src,
+ signal,
+ pll_settings->calculated_pix_clk);
+
+/* Pixel clock PLL has been programmed to generate desired pixel clock,
+ * now enable SS on pixel clock */
+/* TODO is it OK to return true not doing anything ??*/
+ if (ss_data != NULL && pll_settings->ss_percentage != 0) {
+ if (calculate_ss(pll_settings, ss_data, &d_s_data)) {
+ bp_params.ds.feedback_amount =
+ d_s_data.feedback_amount;
+ bp_params.ds.nfrac_amount =
+ d_s_data.nfrac_amount;
+ bp_params.ds.ds_frac_size = d_s_data.ds_frac_size;
+ bp_params.ds_frac_amount =
+ d_s_data.ds_frac_amount;
+ bp_params.flags.DS_TYPE = 1;
+ bp_params.pll_id = clk_src->base.id;
+ bp_params.percentage = ss_data->percentage;
+ if (ss_data->flags.CENTER_SPREAD)
+ bp_params.flags.CENTER_SPREAD = 1;
+ if (ss_data->flags.EXTERNAL_SS)
+ bp_params.flags.EXTERNAL_SS = 1;
+
+ if (BP_RESULT_OK !=
+ clk_src->bios->funcs->
+ enable_spread_spectrum_on_ppll(
+ clk_src->bios,
+ &bp_params,
+ true))
+ return false;
+ } else
+ return false;
+ }
+ return true;
+}
+
+static void dce110_program_pixel_clk_resync(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal_type,
+ enum dc_color_depth colordepth)
+{
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 0);
+ /*
+ 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
+ 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+ 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
+ 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
+ */
+ if (signal_type != SIGNAL_TYPE_HDMI_TYPE_A)
+ return;
+
+ switch (colordepth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 1);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 2);
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE(RESYNC_CNTL,
+ DCCG_DEEP_COLOR_CNTL1, 3);
+ break;
+ default:
+ break;
+ }
+}
+
+static void dce112_program_pixel_clk_resync(
+ struct dce110_clk_src *clk_src,
+ enum signal_type signal_type,
+ enum dc_color_depth colordepth,
+ bool enable_ycbcr420)
+{
+ uint32_t deep_color_cntl = 0;
+ uint32_t double_rate_enable = 0;
+
+ /*
+ 24 bit mode: TMDS clock = 1.0 x pixel clock (1:1)
+ 30 bit mode: TMDS clock = 1.25 x pixel clock (5:4)
+ 36 bit mode: TMDS clock = 1.5 x pixel clock (3:2)
+ 48 bit mode: TMDS clock = 2 x pixel clock (2:1)
+ */
+ if (signal_type == SIGNAL_TYPE_HDMI_TYPE_A) {
+ double_rate_enable = enable_ycbcr420 ? 1 : 0;
+
+ switch (colordepth) {
+ case COLOR_DEPTH_888:
+ deep_color_cntl = 0;
+ break;
+ case COLOR_DEPTH_101010:
+ deep_color_cntl = 1;
+ break;
+ case COLOR_DEPTH_121212:
+ deep_color_cntl = 2;
+ break;
+ case COLOR_DEPTH_161616:
+ deep_color_cntl = 3;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (clk_src->cs_mask->PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE)
+ REG_UPDATE_2(PIXCLK_RESYNC_CNTL,
+ PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl,
+ PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, double_rate_enable);
+ else
+ REG_UPDATE(PIXCLK_RESYNC_CNTL,
+ PHYPLLA_DCCG_DEEP_COLOR_CNTL, deep_color_cntl);
+
+}
+
+static bool dce110_program_pix_clk(
+ struct clock_source *clock_source,
+ struct pixel_clk_params *pix_clk_params,
+ struct pll_settings *pll_settings)
+{
+ struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
+ struct bp_pixel_clock_parameters bp_pc_params = {0};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
+ unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
+ unsigned dp_dto_ref_kHz = 600000;
+ /* DPREF clock from FPGA TODO: Does FPGA have this value? */
+ unsigned clock_kHz = pll_settings->actual_pix_clk;
+
+ /* For faster simulation, if mode pixe clock less than 290MHz,
+ * pixel clock can be hard coded to 290Mhz. For 4K mode, pixel clock
+ * is greater than 500Mhz, need real pixel clock
+ * clock_kHz = 290000;
+ */
+ /* TODO: un-hardcode when we can set display clock properly*/
+ /*clock_kHz = pix_clk_params->requested_pix_clk;*/
+ clock_kHz = 290000;
+
+ /* Set DTO values: phase = target clock, modulo = reference clock */
+ REG_WRITE(PHASE[inst], clock_kHz);
+ REG_WRITE(MODULO[inst], dp_dto_ref_kHz);
+
+ /* Enable DTO */
+ REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
+ return true;
+ }
+#endif
+ /* First disable SS
+ * ATOMBIOS will enable by default SS on PLL for DP,
+ * do not disable it here
+ */
+ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL &&
+ !dc_is_dp_signal(pix_clk_params->signal_type) &&
+ clock_source->ctx->dce_version <= DCE_VERSION_11_0)
+ disable_spread_spectrum(clk_src);
+
+ /*ATOMBIOS expects pixel rate adjusted by deep color ratio)*/
+ bp_pc_params.controller_id = pix_clk_params->controller_id;
+ bp_pc_params.pll_id = clock_source->id;
+ bp_pc_params.target_pixel_clock = pll_settings->actual_pix_clk;
+ bp_pc_params.encoder_object_id = pix_clk_params->encoder_object_id;
+ bp_pc_params.signal_type = pix_clk_params->signal_type;
+
+ switch (clock_source->ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ bp_pc_params.reference_divider = pll_settings->reference_divider;
+ bp_pc_params.feedback_divider = pll_settings->feedback_divider;
+ bp_pc_params.fractional_feedback_divider =
+ pll_settings->fract_feedback_divider;
+ bp_pc_params.pixel_clock_post_divider =
+ pll_settings->pix_clk_post_divider;
+ bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+ /* Enable SS
+ * ATOMBIOS will enable by default SS for DP on PLL ( DP ID clock),
+ * based on HW display PLL team, SS control settings should be programmed
+ * during PLL Reset, but they do not have effect
+ * until SS_EN is asserted.*/
+ if (clock_source->id != CLOCK_SOURCE_ID_EXTERNAL
+ && !dc_is_dp_signal(pix_clk_params->signal_type)) {
+
+ if (pix_clk_params->flags.ENABLE_SS)
+ if (!enable_spread_spectrum(clk_src,
+ pix_clk_params->signal_type,
+ pll_settings))
+ return false;
+
+ /* Resync deep color DTO */
+ dce110_program_pixel_clk_resync(clk_src,
+ pix_clk_params->signal_type,
+ pix_clk_params->color_depth);
+ }
+
+ break;
+ case DCE_VERSION_11_2:
+ case DCE_VERSION_12_0:
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+#endif
+
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO) {
+ bp_pc_params.flags.SET_GENLOCK_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+ bp_pc_params.flags.SET_XTALIN_REF_SRC =
+ !pll_settings->use_external_clk;
+ if (pix_clk_params->flags.SUPPORT_YCBCR420) {
+ bp_pc_params.flags.SUPPORT_YUV_420 = 1;
+ }
+ }
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+ /* Resync deep color DTO */
+ if (clock_source->id != CLOCK_SOURCE_ID_DP_DTO)
+ dce112_program_pixel_clk_resync(clk_src,
+ pix_clk_params->signal_type,
+ pix_clk_params->color_depth,
+ pix_clk_params->flags.SUPPORT_YCBCR420);
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool dce110_clock_source_power_down(
+ struct clock_source *clk_src)
+{
+ struct dce110_clk_src *dce110_clk_src = TO_DCE110_CLK_SRC(clk_src);
+ enum bp_result bp_result;
+ struct bp_pixel_clock_parameters bp_pixel_clock_params = {0};
+
+ if (clk_src->dp_clk_src)
+ return true;
+
+ /* If Pixel Clock is 0 it means Power Down Pll*/
+ bp_pixel_clock_params.controller_id = CONTROLLER_ID_UNDEFINED;
+ bp_pixel_clock_params.pll_id = clk_src->id;
+ bp_pixel_clock_params.flags.FORCE_PROGRAMMING_OF_PLL = 1;
+
+ /*Call ASICControl to process ATOMBIOS Exec table*/
+ bp_result = dce110_clk_src->bios->funcs->set_pixel_clock(
+ dce110_clk_src->bios,
+ &bp_pixel_clock_params);
+
+ return bp_result == BP_RESULT_OK;
+}
+
+/*****************************************/
+/* Constructor */
+/*****************************************/
+static const struct clock_source_funcs dce110_clk_src_funcs = {
+ .cs_power_down = dce110_clock_source_power_down,
+ .program_pix_clk = dce110_program_pix_clk,
+ .get_pix_clk_dividers = dce110_get_pix_clk_dividers,
+ .get_pix_rate_in_hz = dce110_get_pix_rate_in_hz
+};
+
+static void get_ss_info_from_atombios(
+ struct dce110_clk_src *clk_src,
+ enum as_signal_type as_signal,
+ struct spread_spectrum_data *spread_spectrum_data[],
+ uint32_t *ss_entries_num)
+{
+ enum bp_result bp_result = BP_RESULT_FAILURE;
+ struct spread_spectrum_info *ss_info;
+ struct spread_spectrum_data *ss_data;
+ struct spread_spectrum_info *ss_info_cur;
+ struct spread_spectrum_data *ss_data_cur;
+ uint32_t i;
+
+ if (ss_entries_num == NULL) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid entry !!!\n");
+ return;
+ }
+ if (spread_spectrum_data == NULL) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid array pointer!!!\n");
+ return;
+ }
+
+ spread_spectrum_data[0] = NULL;
+ *ss_entries_num = 0;
+
+ *ss_entries_num = clk_src->bios->funcs->get_ss_entry_number(
+ clk_src->bios,
+ as_signal);
+
+ if (*ss_entries_num == 0)
+ return;
+
+ ss_info = kzalloc(sizeof(struct spread_spectrum_info) * (*ss_entries_num),
+ GFP_KERNEL);
+ ss_info_cur = ss_info;
+ if (ss_info == NULL)
+ return;
+
+ ss_data = kzalloc(sizeof(struct spread_spectrum_data) * (*ss_entries_num),
+ GFP_KERNEL);
+ if (ss_data == NULL)
+ goto out_free_info;
+
+ for (i = 0, ss_info_cur = ss_info;
+ i < (*ss_entries_num);
+ ++i, ++ss_info_cur) {
+
+ bp_result = clk_src->bios->funcs->get_spread_spectrum_info(
+ clk_src->bios,
+ as_signal,
+ i,
+ ss_info_cur);
+
+ if (bp_result != BP_RESULT_OK)
+ goto out_free_data;
+ }
+
+ for (i = 0, ss_info_cur = ss_info, ss_data_cur = ss_data;
+ i < (*ss_entries_num);
+ ++i, ++ss_info_cur, ++ss_data_cur) {
+
+ if (ss_info_cur->type.STEP_AND_DELAY_INFO != false) {
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid ATOMBIOS SS Table!!!\n");
+ goto out_free_data;
+ }
+
+ /* for HDMI check SS percentage,
+ * if it is > 6 (0.06%), the ATOMBIOS table info is invalid*/
+ if (as_signal == AS_SIGNAL_TYPE_HDMI
+ && ss_info_cur->spread_spectrum_percentage > 6){
+ /* invalid input, do nothing */
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "Invalid SS percentage ");
+ dm_logger_write(clk_src->base.ctx->logger, LOG_SYNC,
+ "for HDMI in ATOMBIOS info Table!!!\n");
+ continue;
+ }
+ if (ss_info_cur->spread_percentage_divider == 1000) {
+ /* Keep previous precision from ATOMBIOS for these
+ * in case new precision set by ATOMBIOS for these
+ * (otherwise all code in DCE specific classes
+ * for all previous ASICs would need
+ * to be updated for SS calculations,
+ * Audio SS compensation and DP DTO SS compensation
+ * which assumes fixed SS percentage Divider = 100)*/
+ ss_info_cur->spread_spectrum_percentage /= 10;
+ ss_info_cur->spread_percentage_divider = 100;
+ }
+
+ ss_data_cur->freq_range_khz = ss_info_cur->target_clock_range;
+ ss_data_cur->percentage =
+ ss_info_cur->spread_spectrum_percentage;
+ ss_data_cur->percentage_divider =
+ ss_info_cur->spread_percentage_divider;
+ ss_data_cur->modulation_freq_hz =
+ ss_info_cur->spread_spectrum_range;
+
+ if (ss_info_cur->type.CENTER_MODE)
+ ss_data_cur->flags.CENTER_SPREAD = 1;
+
+ if (ss_info_cur->type.EXTERNAL)
+ ss_data_cur->flags.EXTERNAL_SS = 1;
+
+ }
+
+ *spread_spectrum_data = ss_data;
+ kfree(ss_info);
+ return;
+
+out_free_data:
+ kfree(ss_data);
+ *ss_entries_num = 0;
+out_free_info:
+ kfree(ss_info);
+}
+
+static void ss_info_from_atombios_create(
+ struct dce110_clk_src *clk_src)
+{
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_DISPLAY_PORT,
+ &clk_src->dp_ss_params,
+ &clk_src->dp_ss_params_cnt);
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_HDMI,
+ &clk_src->hdmi_ss_params,
+ &clk_src->hdmi_ss_params_cnt);
+ get_ss_info_from_atombios(
+ clk_src,
+ AS_SIGNAL_TYPE_DVI,
+ &clk_src->dvi_ss_params,
+ &clk_src->dvi_ss_params_cnt);
+}
+
+static bool calc_pll_max_vco_construct(
+ struct calc_pll_clock_source *calc_pll_cs,
+ struct calc_pll_clock_source_init_data *init_data)
+{
+ uint32_t i;
+ struct dc_firmware_info fw_info = { { 0 } };
+ if (calc_pll_cs == NULL ||
+ init_data == NULL ||
+ init_data->bp == NULL)
+ return false;
+
+ if (init_data->bp->funcs->get_firmware_info(
+ init_data->bp,
+ &fw_info) != BP_RESULT_OK)
+ return false;
+
+ calc_pll_cs->ctx = init_data->ctx;
+ calc_pll_cs->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+ calc_pll_cs->min_vco_khz =
+ fw_info.pll_info.min_output_pxl_clk_pll_frequency;
+ calc_pll_cs->max_vco_khz =
+ fw_info.pll_info.max_output_pxl_clk_pll_frequency;
+
+ if (init_data->max_override_input_pxl_clk_pll_freq_khz != 0)
+ calc_pll_cs->max_pll_input_freq_khz =
+ init_data->max_override_input_pxl_clk_pll_freq_khz;
+ else
+ calc_pll_cs->max_pll_input_freq_khz =
+ fw_info.pll_info.max_input_pxl_clk_pll_frequency;
+
+ if (init_data->min_override_input_pxl_clk_pll_freq_khz != 0)
+ calc_pll_cs->min_pll_input_freq_khz =
+ init_data->min_override_input_pxl_clk_pll_freq_khz;
+ else
+ calc_pll_cs->min_pll_input_freq_khz =
+ fw_info.pll_info.min_input_pxl_clk_pll_frequency;
+
+ calc_pll_cs->min_pix_clock_pll_post_divider =
+ init_data->min_pix_clk_pll_post_divider;
+ calc_pll_cs->max_pix_clock_pll_post_divider =
+ init_data->max_pix_clk_pll_post_divider;
+ calc_pll_cs->min_pll_ref_divider =
+ init_data->min_pll_ref_divider;
+ calc_pll_cs->max_pll_ref_divider =
+ init_data->max_pll_ref_divider;
+
+ if (init_data->num_fract_fb_divider_decimal_point == 0 ||
+ init_data->num_fract_fb_divider_decimal_point_precision >
+ init_data->num_fract_fb_divider_decimal_point) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "The dec point num or precision is incorrect!");
+ return false;
+ }
+ if (init_data->num_fract_fb_divider_decimal_point_precision == 0) {
+ dm_logger_write(calc_pll_cs->ctx->logger, LOG_ERROR,
+ "Incorrect fract feedback divider precision num!");
+ return false;
+ }
+
+ calc_pll_cs->fract_fb_divider_decimal_points_num =
+ init_data->num_fract_fb_divider_decimal_point;
+ calc_pll_cs->fract_fb_divider_precision =
+ init_data->num_fract_fb_divider_decimal_point_precision;
+ calc_pll_cs->fract_fb_divider_factor = 1;
+ for (i = 0; i < calc_pll_cs->fract_fb_divider_decimal_points_num; ++i)
+ calc_pll_cs->fract_fb_divider_factor *= 10;
+
+ calc_pll_cs->fract_fb_divider_precision_factor = 1;
+ for (
+ i = 0;
+ i < (calc_pll_cs->fract_fb_divider_decimal_points_num -
+ calc_pll_cs->fract_fb_divider_precision);
+ ++i)
+ calc_pll_cs->fract_fb_divider_precision_factor *= 10;
+
+ return true;
+}
+
+bool dce110_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask)
+{
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct calc_pll_clock_source_init_data calc_pll_cs_init_data_hdmi;
+ struct calc_pll_clock_source_init_data calc_pll_cs_init_data;
+
+ clk_src->base.ctx = ctx;
+ clk_src->bios = bios;
+ clk_src->base.id = id;
+ clk_src->base.funcs = &dce110_clk_src_funcs;
+
+ clk_src->regs = regs;
+ clk_src->cs_shift = cs_shift;
+ clk_src->cs_mask = cs_mask;
+
+ if (clk_src->bios->funcs->get_firmware_info(
+ clk_src->bios, &fw_info) != BP_RESULT_OK) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+
+ clk_src->ext_clk_khz =
+ fw_info.external_clock_source_frequency_for_dp;
+
+ switch (clk_src->base.ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+
+ /* structure normally used with PLL ranges from ATOMBIOS; DS on by default */
+ calc_pll_cs_init_data.bp = bios;
+ calc_pll_cs_init_data.min_pix_clk_pll_post_divider = 1;
+ calc_pll_cs_init_data.max_pix_clk_pll_post_divider =
+ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+ calc_pll_cs_init_data.min_pll_ref_divider = 1;
+ calc_pll_cs_init_data.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data.min_override_input_pxl_clk_pll_freq_khz = 0;
+ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data.max_override_input_pxl_clk_pll_freq_khz = 0;
+ /*numberOfFractFBDividerDecimalPoints*/
+ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ /*number of decimal point to round off for fractional feedback divider value*/
+ calc_pll_cs_init_data.num_fract_fb_divider_decimal_point_precision =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ calc_pll_cs_init_data.ctx = ctx;
+
+ /*structure for HDMI, no SS or SS% <= 0.06% for 27 MHz Ref clock */
+ calc_pll_cs_init_data_hdmi.bp = bios;
+ calc_pll_cs_init_data_hdmi.min_pix_clk_pll_post_divider = 1;
+ calc_pll_cs_init_data_hdmi.max_pix_clk_pll_post_divider =
+ clk_src->cs_mask->PLL_POST_DIV_PIXCLK;
+ calc_pll_cs_init_data_hdmi.min_pll_ref_divider = 1;
+ calc_pll_cs_init_data_hdmi.max_pll_ref_divider = clk_src->cs_mask->PLL_REF_DIV;
+ /* when 0 use minInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data_hdmi.min_override_input_pxl_clk_pll_freq_khz = 13500;
+ /* when 0 use maxInputPxlClkPLLFrequencyInKHz from firmwareInfo*/
+ calc_pll_cs_init_data_hdmi.max_override_input_pxl_clk_pll_freq_khz = 27000;
+ /*numberOfFractFBDividerDecimalPoints*/
+ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ /*number of decimal point to round off for fractional feedback divider value*/
+ calc_pll_cs_init_data_hdmi.num_fract_fb_divider_decimal_point_precision =
+ FRACT_FB_DIVIDER_DEC_POINTS_MAX_NUM;
+ calc_pll_cs_init_data_hdmi.ctx = ctx;
+
+ clk_src->ref_freq_khz = fw_info.pll_info.crystal_frequency;
+
+ if (clk_src->base.id == CLOCK_SOURCE_ID_EXTERNAL)
+ return true;
+
+ /* PLL only from here on */
+ ss_info_from_atombios_create(clk_src);
+
+ if (!calc_pll_max_vco_construct(
+ &clk_src->calc_pll,
+ &calc_pll_cs_init_data)) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+
+
+ calc_pll_cs_init_data_hdmi.
+ min_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz/2;
+ calc_pll_cs_init_data_hdmi.
+ max_override_input_pxl_clk_pll_freq_khz = clk_src->ref_freq_khz;
+
+
+ if (!calc_pll_max_vco_construct(
+ &clk_src->calc_pll_hdmi, &calc_pll_cs_init_data_hdmi)) {
+ ASSERT_CRITICAL(false);
+ goto unexpected_failure;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+
+unexpected_failure:
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
new file mode 100644
index 000000000000..c45e2f76189e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -0,0 +1,145 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_CLOCK_SOURCE_DCE_H__
+#define __DC_CLOCK_SOURCE_DCE_H__
+
+#include "../inc/clock_source.h"
+
+#define TO_DCE110_CLK_SRC(clk_src)\
+ container_of(clk_src, struct dce110_clk_src, base)
+
+#define CS_COMMON_REG_LIST_DCE_100_110(id) \
+ SRI(RESYNC_CNTL, PIXCLK, id), \
+ SRI(PLL_CNTL, BPHYC_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_80(id) \
+ SRI(RESYNC_CNTL, PIXCLK, id), \
+ SRI(PLL_CNTL, DCCG_PLL, id)
+
+#define CS_COMMON_REG_LIST_DCE_112(id) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, id)
+
+
+#define CS_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ CS_SF(PLL_CNTL, PLL_REF_DIV_SRC, mask_sh),\
+ CS_SF(PIXCLK1_RESYNC_CNTL, DCCG_DEEP_COLOR_CNTL1, mask_sh),\
+ CS_SF(PLL_POST_DIV, PLL_POST_DIV_PIXCLK, mask_sh),\
+ CS_SF(PLL_REF_DIV, PLL_REF_DIV, mask_sh)
+
+#define CS_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+
+#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(PHASE, DP_DTO, 2),\
+ SRII(PHASE, DP_DTO, 3),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 2),\
+ SRII(MODULO, DP_DTO, 3),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0), \
+ SRII(PIXEL_RATE_CNTL, OTG, 1), \
+ SRII(PIXEL_RATE_CNTL, OTG, 2), \
+ SRII(PIXEL_RATE_CNTL, OTG, 3)
+
+#define CS_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
+ CS_SF(DP_DTO0_MODULO, DP_DTO0_MODULO, mask_sh),\
+ CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
+ CS_SF(OTG0_PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
+
+#endif
+
+#define CS_REG_FIELD_LIST(type) \
+ type PLL_REF_DIV_SRC; \
+ type DCCG_DEEP_COLOR_CNTL1; \
+ type PHYPLLA_DCCG_DEEP_COLOR_CNTL; \
+ type PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE; \
+ type PLL_POST_DIV_PIXCLK; \
+ type PLL_REF_DIV; \
+ type DP_DTO0_PHASE; \
+ type DP_DTO0_MODULO; \
+ type DP_DTO0_ENABLE;
+
+struct dce110_clk_src_shift {
+ CS_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce110_clk_src_mask{
+ CS_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce110_clk_src_regs {
+ uint32_t RESYNC_CNTL;
+ uint32_t PIXCLK_RESYNC_CNTL;
+ uint32_t PLL_CNTL;
+
+ /* below are for DTO.
+ * todo: should probably use different struct to not waste space
+ */
+ uint32_t PHASE[MAX_PIPES];
+ uint32_t MODULO[MAX_PIPES];
+ uint32_t PIXEL_RATE_CNTL[MAX_PIPES];
+};
+
+struct dce110_clk_src {
+ struct clock_source base;
+ const struct dce110_clk_src_regs *regs;
+ const struct dce110_clk_src_mask *cs_mask;
+ const struct dce110_clk_src_shift *cs_shift;
+ struct dc_bios *bios;
+
+ struct spread_spectrum_data *dp_ss_params;
+ uint32_t dp_ss_params_cnt;
+ struct spread_spectrum_data *hdmi_ss_params;
+ uint32_t hdmi_ss_params_cnt;
+ struct spread_spectrum_data *dvi_ss_params;
+ uint32_t dvi_ss_params_cnt;
+
+ uint32_t ext_clk_khz;
+ uint32_t ref_freq_khz;
+
+ struct calc_pll_clock_source calc_pll;
+ struct calc_pll_clock_source calc_pll_hdmi;
+};
+
+bool dce110_clk_src_construct(
+ struct dce110_clk_src *clk_src,
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id,
+ const struct dce110_clk_src_regs *regs,
+ const struct dce110_clk_src_shift *cs_shift,
+ const struct dce110_clk_src_mask *cs_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
new file mode 100644
index 000000000000..9031d22285ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -0,0 +1,827 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clocks.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "bios_parser_interface.h"
+#include "dc.h"
+#include "dce_abm.h"
+#include "dmcu.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn_calcs.h"
+#endif
+#include "core_types.h"
+
+
+#define TO_DCE_CLOCKS(clocks)\
+ container_of(clocks, struct dce_disp_clk, base)
+
+#define REG(reg) \
+ (clk_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
+
+#define CTX \
+ clk_dce->base.ctx
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+/* Starting point for each divider range.*/
+enum dce_divider_range_start {
+ DIVIDER_RANGE_01_START = 200, /* 2.00*/
+ DIVIDER_RANGE_02_START = 1600, /* 16.00*/
+ DIVIDER_RANGE_03_START = 3200, /* 32.00*/
+ DIVIDER_RANGE_SCALE_FACTOR = 100 /* Results are scaled up by 100.*/
+};
+
+/* Ranges for divider identifiers (Divider ID or DID)
+ mmDENTIST_DISPCLK_CNTL.DENTIST_DISPCLK_WDIVIDER*/
+enum dce_divider_id_register_setting {
+ DIVIDER_RANGE_01_BASE_DIVIDER_ID = 0X08,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID = 0X40,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID = 0X60,
+ DIVIDER_RANGE_MAX_DIVIDER_ID = 0X80
+};
+
+/* Step size between each divider within a range.
+ Incrementing the DENTIST_DISPCLK_WDIVIDER by one
+ will increment the divider by this much.*/
+enum dce_divider_range_step_size {
+ DIVIDER_RANGE_01_STEP_SIZE = 25, /* 0.25*/
+ DIVIDER_RANGE_02_STEP_SIZE = 50, /* 0.50*/
+ DIVIDER_RANGE_03_STEP_SIZE = 100 /* 1.00 */
+};
+
+static bool dce_divider_range_construct(
+ struct dce_divider_range *div_range,
+ int range_start,
+ int range_step,
+ int did_min,
+ int did_max)
+{
+ div_range->div_range_start = range_start;
+ div_range->div_range_step = range_step;
+ div_range->did_min = did_min;
+ div_range->did_max = did_max;
+
+ if (div_range->div_range_step == 0) {
+ div_range->div_range_step = 1;
+ /*div_range_step cannot be zero*/
+ BREAK_TO_DEBUGGER();
+ }
+ /* Calculate this based on the other inputs.*/
+ /* See DividerRange.h for explanation of */
+ /* the relationship between divider id (DID) and a divider.*/
+ /* Number of Divider IDs = (Maximum Divider ID - Minimum Divider ID)*/
+ /* Maximum divider identified in this range =
+ * (Number of Divider IDs)*Step size between dividers
+ * + The start of this range.*/
+ div_range->div_range_end = (did_max - did_min) * range_step
+ + range_start;
+ return true;
+}
+
+static int dce_divider_range_calc_divider(
+ struct dce_divider_range *div_range,
+ int did)
+{
+ /* Is this DID within our range?*/
+ if ((did < div_range->did_min) || (did >= div_range->did_max))
+ return INVALID_DIVIDER;
+
+ return ((did - div_range->did_min) * div_range->div_range_step)
+ + div_range->div_range_start;
+
+}
+
+static int dce_divider_range_get_divider(
+ struct dce_divider_range *div_range,
+ int ranges_num,
+ int did)
+{
+ int div = INVALID_DIVIDER;
+ int i;
+
+ for (i = 0; i < ranges_num; i++) {
+ /* Calculate divider with given divider ID*/
+ div = dce_divider_range_calc_divider(&div_range[i], did);
+ /* Found a valid return divider*/
+ if (div != INVALID_DIVIDER)
+ break;
+ }
+ return div;
+}
+
+static int dce_clocks_get_dp_ref_freq(struct display_clock *clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+ int target_div = INVALID_DIVIDER;
+
+ /* ASSERT DP Reference Clock source is from DFS*/
+ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+ ASSERT(dprefclk_src_sel == 0);
+
+ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+ target_div = dce_divider_range_get_divider(
+ clk_dce->divider_ranges,
+ DIVIDER_RANGE_MAX,
+ dprefclk_wdivider);
+
+ if (target_div != INVALID_DIVIDER) {
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DIVIDER_RANGE_SCALE_FACTOR
+ * clk_dce->dentist_vco_freq_khz) / target_div;
+ }
+
+ /* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+ dal_fixed32_32_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed32_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+ dal_fixed32_32_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+ dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+}
+
+/* TODO: This is DCN DPREFCLK: it could be program by DENTIST by VBIOS
+ * or CLK0_CLK11 by SMU. For DCE120, it is wlays 600Mhz. Will re-visit
+ * clock implementation
+ */
+static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int dp_ref_clk_khz = 600000;
+
+ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
+ struct fixed32_32 ss_percentage = dal_fixed32_32_div_int(
+ dal_fixed32_32_from_fraction(
+ clk_dce->dprefclk_ss_percentage,
+ clk_dce->dprefclk_ss_divider), 200);
+ struct fixed32_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one,
+ ss_percentage);
+ adj_dp_ref_clk_khz =
+ dal_fixed32_32_mul_int(
+ ss_percentage,
+ dp_ref_clk_khz);
+ dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz);
+ }
+
+ return dp_ref_clk_khz;
+}
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct display_clock *clk,
+ struct state_dependent_clocks *req_clocks)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
+
+ /* Iterate from highest supported to lowest valid state, and update
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+ for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+ if (req_clocks->display_clk_khz >
+ clk_dce->max_clks_by_state[i].display_clk_khz
+ || req_clocks->pixel_clk_khz >
+ clk_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk->max_clks_state) {
+ dm_logger_write(clk->ctx->logger, LOG_WARNING,
+ "%s: clocks unsupported", __func__);
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ }
+
+ return low_req_clk;
+}
+
+static bool dce_clock_set_min_clocks_state(
+ struct display_clock *clk,
+ enum dm_pp_clocks_state clocks_state)
+{
+ struct dm_pp_power_level_change_request level_change_req = {
+ clocks_state };
+
+ if (clocks_state > clk->max_clks_state) {
+ /*Requested state exceeds max supported state.*/
+ dm_logger_write(clk->ctx->logger, LOG_WARNING,
+ "Requested state exceeds max supported state");
+ return false;
+ } else if (clocks_state == clk->cur_min_clks_state) {
+ /*if we're trying to set the same state, we can just return
+ * since nothing needs to be done*/
+ return true;
+ }
+
+ /* get max clock state from PPLIB */
+ if (dm_pp_apply_power_level_change_request(clk->ctx, &level_change_req))
+ clk->cur_min_clks_state = clocks_state;
+
+ return true;
+}
+
+static int dce_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_dce->dentist_vco_freq_khz / 64);
+
+ /* Prepare to program display clock*/
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+ if (clk_dce->dfs_bypass_enabled) {
+
+ /* Cache the fixed display clock*/
+ clk_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+ }
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ return actual_clock;
+}
+
+static int dce_psr_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct dc_context *ctx = clk_dce->base.ctx;
+ struct dc *core_dc = ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clk_khz = requested_clk_khz;
+
+ actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
+
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
+ return actual_clk_khz;
+}
+
+static int dce112_set_clock(
+ struct display_clock *clk,
+ int requested_clk_khz)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(clk);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk->ctx->dc_bios;
+ struct dc *core_dc = clk->ctx->dc;
+ struct abm *abm = core_dc->res_pool->abm;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_dce->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ actual_clock = dce_clk_params.target_clock_frequency;
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ if (abm->funcs->is_dmcu_initialized(abm) && clk_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ clk_dce->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_disp_clk *clk_dce)
+{
+ struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+ int i;
+
+ if (bp->integrated_info)
+ info = *bp->integrated_info;
+
+ clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_dce->dentist_vco_freq_khz =
+ fw_info.smu_gpu_pll_output_freq;
+ if (clk_dce->dentist_vco_freq_khz == 0)
+ clk_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ /*update the maximum display clock for each power state*/
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ switch (i) {
+ case 0:
+ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+ break;
+
+ case 1:
+ clk_state = DM_PP_CLOCKS_STATE_LOW;
+ break;
+
+ case 2:
+ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ break;
+
+ case 3:
+ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+ break;
+
+ default:
+ clk_state = DM_PP_CLOCKS_STATE_INVALID;
+ break;
+ }
+
+ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+ * check for > 100MHz*/
+ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+ clk_dce->max_clks_by_state[clk_state].display_clk_khz =
+ info.disp_clk_voltage[i].max_supported_clk;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_dce->dfs_bypass_enabled = true;
+
+ clk_dce->use_max_disp_clk = debug->max_disp_clk;
+}
+
+static void dce_clock_read_ss_info(struct dce_disp_clk *clk_dce)
+{
+ struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+ bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+ if (ss_info_num) {
+ struct spread_spectrum_info info = { { 0 } };
+ enum bp_result result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_dce->ss_on_dprefclk = true;
+ clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* TODO: Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+
+ return;
+ }
+
+ result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_dce->ss_on_dprefclk = true;
+ clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+ }
+}
+
+static bool dce_apply_clock_voltage_request(
+ struct display_clock *clk,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz,
+ bool pre_mode_set,
+ bool update_dp_phyclk)
+{
+ bool send_request = false;
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ clock_voltage_req.clk_type = clocks_type;
+ clock_voltage_req.clocks_in_khz = clocks_in_khz;
+
+ /* to pplib */
+ if (pre_mode_set) {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (clocks_in_khz > clk->cur_clocks_value.dispclk_in_khz) {
+ clk->cur_clocks_value.dispclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.dispclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.dispclk_in_khz = clocks_in_khz;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ if (clocks_in_khz > clk->cur_clocks_value.max_pixelclk_in_khz) {
+ clk->cur_clocks_value.pixelclk_notify_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.pixelclk_notify_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.max_pixelclk_in_khz = clocks_in_khz;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (clocks_in_khz > clk->cur_clocks_value.max_non_dp_phyclk_in_khz) {
+ clk->cur_clocks_value.phyclk_notigy_pplib_done = true;
+ send_request = true;
+ } else
+ clk->cur_clocks_value.phyclk_notigy_pplib_done = false;
+ /* no matter incrase or decrase clock, update current clock value */
+ clk->cur_clocks_value.max_non_dp_phyclk_in_khz = clocks_in_khz;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ } else {
+ switch (clocks_type) {
+ case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
+ if (!clk->cur_clocks_value.dispclk_notify_pplib_done)
+ send_request = true;
+ break;
+ case DM_PP_CLOCK_TYPE_PIXELCLK:
+ if (!clk->cur_clocks_value.pixelclk_notify_pplib_done)
+ send_request = true;
+ break;
+ case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
+ if (!clk->cur_clocks_value.phyclk_notigy_pplib_done)
+ send_request = true;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+ if (send_request) {
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (clk->ctx->dce_version >= DCN_VERSION_1_0) {
+ struct dc *core_dc = clk->ctx->dc;
+ /*use dcfclk request voltage*/
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock_voltage_req.clocks_in_khz =
+ dcn_find_dcfclk_suits_all(core_dc, &clk->cur_clocks_value);
+ }
+#endif
+ dm_pp_apply_clock_for_voltage_request(
+ clk->ctx, &clock_voltage_req);
+ }
+ if (update_dp_phyclk && (clocks_in_khz >
+ clk->cur_clocks_value.max_dp_phyclk_in_khz))
+ clk->cur_clocks_value.max_dp_phyclk_in_khz = clocks_in_khz;
+
+ return true;
+}
+
+
+static const struct display_clock_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq_wrkaround,
+ .apply_clock_voltage_request = dce_apply_clock_voltage_request,
+ .set_clock = dce112_set_clock
+};
+
+static const struct display_clock_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce112_set_clock
+};
+
+static const struct display_clock_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce_psr_set_clock
+};
+
+static const struct display_clock_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_clocks_get_dp_ref_freq,
+ .get_required_clocks_state = dce_get_required_clocks_state,
+ .set_min_clocks_state = dce_clock_set_min_clocks_state,
+ .set_clock = dce_set_clock
+};
+
+static void dce_disp_clk_construct(
+ struct dce_disp_clk *clk_dce,
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct display_clock *base = &clk_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ clk_dce->regs = regs;
+ clk_dce->clk_shift = clk_shift;
+ clk_dce->clk_mask = clk_mask;
+
+ clk_dce->dfs_bypass_disp_clk = 0;
+
+ clk_dce->dprefclk_ss_percentage = 0;
+ clk_dce->dprefclk_ss_divider = 1000;
+ clk_dce->ss_on_dprefclk = false;
+
+ base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ dce_clock_read_integrated_info(clk_dce);
+ dce_clock_read_ss_info(clk_dce);
+
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_01],
+ DIVIDER_RANGE_01_START,
+ DIVIDER_RANGE_01_STEP_SIZE,
+ DIVIDER_RANGE_01_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID);
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_02],
+ DIVIDER_RANGE_02_START,
+ DIVIDER_RANGE_02_STEP_SIZE,
+ DIVIDER_RANGE_02_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID);
+ dce_divider_range_construct(
+ &clk_dce->divider_ranges[DIVIDER_RANGE_03],
+ DIVIDER_RANGE_03_START,
+ DIVIDER_RANGE_03_STEP_SIZE,
+ DIVIDER_RANGE_03_BASE_DIVIDER_ID,
+ DIVIDER_RANGE_MAX_DIVIDER_ID);
+}
+
+struct display_clock *dce_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce110_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce110_funcs;
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce112_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_dce->base.funcs = &dce112_funcs;
+
+ return &clk_dce->base;
+}
+
+struct display_clock *dce120_disp_clk_create(struct dc_context *ctx)
+{
+ struct dce_disp_clk *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
+ struct dm_pp_clock_levels_with_voltage clk_level_info = {0};
+
+ if (clk_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_dce->max_clks_by_state,
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+ dce_disp_clk_construct(
+ clk_dce, ctx, NULL, NULL, NULL);
+
+ clk_dce->base.funcs = &dce120_funcs;
+
+ /* new in dce120 */
+ if (!ctx->dc->debug.disable_pplib_clock_request &&
+ dm_pp_get_clock_levels_by_type_with_voltage(
+ ctx, DM_PP_CLOCK_TYPE_DISPLAY_CLK, &clk_level_info)
+ && clk_level_info.num_levels)
+ clk_dce->max_displ_clk_in_khz =
+ clk_level_info.data[clk_level_info.num_levels - 1].clocks_in_khz;
+ else
+ clk_dce->max_displ_clk_in_khz = 1133000;
+
+ return &clk_dce->base;
+}
+
+void dce_disp_clk_destroy(struct display_clock **disp_clk)
+{
+ struct dce_disp_clk *clk_dce = TO_DCE_CLOCKS(*disp_clk);
+
+ kfree(clk_dce);
+ *disp_clk = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
new file mode 100644
index 000000000000..0e717e0dc8f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_CLOCKS_H_
+#define _DCE_CLOCKS_H_
+
+#include "display_clock.h"
+
+#define CLK_COMMON_REG_LIST_DCE_BASE() \
+ .DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
+ .DENTIST_DISPCLK_CNTL = mmDENTIST_DISPCLK_CNTL
+
+#define CLK_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ CLK_SF(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, mask_sh), \
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, mask_sh)
+
+#define CLK_REG_FIELD_LIST(type) \
+ type DPREFCLK_SRC_SEL; \
+ type DENTIST_DPREFCLK_WDIVIDER;
+
+struct dce_disp_clk_shift {
+ CLK_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_disp_clk_mask {
+ CLK_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_disp_clk_registers {
+ uint32_t DPREFCLK_CNTL;
+ uint32_t DENTIST_DISPCLK_CNTL;
+};
+
+/* Array identifiers and count for the divider ranges.*/
+enum dce_divider_range_count {
+ DIVIDER_RANGE_01 = 0,
+ DIVIDER_RANGE_02,
+ DIVIDER_RANGE_03,
+ DIVIDER_RANGE_MAX /* == 3*/
+};
+
+enum dce_divider_error_types {
+ INVALID_DID = 0,
+ INVALID_DIVIDER = 1
+};
+
+struct dce_divider_range {
+ int div_range_start;
+ /* The end of this range of dividers.*/
+ int div_range_end;
+ /* The distance between each divider in this range.*/
+ int div_range_step;
+ /* The divider id for the lowest divider.*/
+ int did_min;
+ /* The divider id for the highest divider.*/
+ int did_max;
+};
+
+struct dce_disp_clk {
+ struct display_clock base;
+ const struct dce_disp_clk_registers *regs;
+ const struct dce_disp_clk_shift *clk_shift;
+ const struct dce_disp_clk_mask *clk_mask;
+
+ struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
+ struct dce_divider_range divider_ranges[DIVIDER_RANGE_MAX];
+
+ bool use_max_disp_clk;
+ int dentist_vco_freq_khz;
+
+ /* Cache the status of DFS-bypass feature*/
+ bool dfs_bypass_enabled;
+ /* Cache the display clock returned by VBIOS if DFS-bypass is enabled.
+ * This is basically "Crystal Frequency In KHz" (XTALIN) frequency */
+ int dfs_bypass_disp_clk;
+
+ /* Flag for Enabled SS on DPREFCLK */
+ bool ss_on_dprefclk;
+ /* DPREFCLK SS percentage (if down-spread enabled) */
+ int dprefclk_ss_percentage;
+ /* DPREFCLK SS percentage Divider (100 or 1000) */
+ int dprefclk_ss_divider;
+
+ /* max disp_clk from PPLIB for max validation display clock*/
+ int max_displ_clk_in_khz;
+};
+
+
+struct display_clock *dce_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce110_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce112_disp_clk_create(
+ struct dc_context *ctx,
+ const struct dce_disp_clk_registers *regs,
+ const struct dce_disp_clk_shift *clk_shift,
+ const struct dce_disp_clk_mask *clk_mask);
+
+struct display_clock *dce120_disp_clk_create(struct dc_context *ctx);
+
+void dce_disp_clk_destroy(struct display_clock **disp_clk);
+
+#endif /* _DCE_CLOCKS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
new file mode 100644
index 000000000000..fd77df573b61
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dce_dmcu.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed32_32.h"
+#include "dc.h"
+
+#define TO_DCE_DMCU(dmcu)\
+ container_of(dmcu, struct dce_dmcu, base)
+
+#define REG(reg) \
+ (dmcu_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dmcu_dce->dmcu_shift->field_name, dmcu_dce->dmcu_mask->field_name
+
+#define CTX \
+ dmcu_dce->base.ctx
+
+/* PSR related commands */
+#define PSR_ENABLE 0x20
+#define PSR_EXIT 0x21
+#define PSR_SET 0x23
+#define PSR_SET_WAITLOOP 0x31
+#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
+unsigned int cached_wait_loop_number = 0;
+
+bool dce_dmcu_load_iram(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int count = 0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_WR_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset);
+
+ for (count = 0; count < bytes; count++)
+ REG_WRITE(DMCU_IRAM_WR_DATA, src[count]);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_WR_ADDR_AUTO_INC, 0);
+
+ return true;
+}
+
+static void dce_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ uint32_t psrStateOffset = 0xf0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
+
+ REG_WAIT(DCI_MEM_PWR_STATUS, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+
+ /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+ *psr_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM after finished using IRAM
+ * in order to allow dynamic sleep state
+ */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
+}
+
+static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ unsigned int retryCount;
+ uint32_t psr_state = 0;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_Cmd */
+ if (enable)
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_ENABLE);
+ else
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_EXIT);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ if (wait == true) {
+ for (retryCount = 0; retryCount <= 100; retryCount++) {
+ dce_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(10);
+ }
+ }
+}
+
+static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ union dce_dmcu_psr_config_data_reg1 masterCmdData1;
+ union dce_dmcu_psr_config_data_reg2 masterCmdData2;
+ union dce_dmcu_psr_config_data_reg3 masterCmdData3;
+
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ /* Enable static screen interrupts for PSR supported display */
+ /* Disable the interrupt coming from other displays. */
+ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 0,
+ STATIC_SCREEN2_INT_TO_UC_EN, 0,
+ STATIC_SCREEN3_INT_TO_UC_EN, 0,
+ STATIC_SCREEN4_INT_TO_UC_EN, 0);
+
+ switch (psr_context->controllerId) {
+ /* Driver uses case 1 for unconfigured */
+ case 1:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN2_INT_TO_UC_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN3_INT_TO_UC_EN, 1);
+ break;
+ case 4:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN4_INT_TO_UC_EN, 1);
+ break;
+ case 5:
+ /* CZ/NL only has 4 CRTC!!
+ * really valid.
+ * There is no interrupt enable mask for these instances.
+ */
+ break;
+ case 6:
+ /* CZ/NL only has 4 CRTC!!
+ * These are here because they are defined in HW regspec,
+ * but not really valid. There is no interrupt enable mask
+ * for these instances.
+ */
+ break;
+ default:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ }
+
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_PSRHostConfigData */
+ masterCmdData1.u32All = 0;
+ masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
+ masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
+ masterCmdData1.bits.rfb_update_auto_en =
+ psr_context->rfb_update_auto_en;
+ masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
+ masterCmdData1.bits.dcp_sel = psr_context->controllerId;
+ masterCmdData1.bits.phy_type = psr_context->phyType;
+ masterCmdData1.bits.frame_cap_ind =
+ psr_context->psrFrameCaptureIndicationReq;
+ masterCmdData1.bits.aux_chan = psr_context->channel;
+ masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
+ masterCmdData1.u32All);
+
+ masterCmdData2.u32All = 0;
+ masterCmdData2.bits.dig_fe = psr_context->engineId;
+ masterCmdData2.bits.dig_be = psr_context->transmitterId;
+ masterCmdData2.bits.skip_wait_for_pll_lock =
+ psr_context->skipPsrWaitForPllLock;
+ masterCmdData2.bits.frame_delay = psr_context->frame_delay;
+ masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
+ masterCmdData2.bits.num_of_controllers =
+ psr_context->numberOfControllers;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2),
+ masterCmdData2.u32All);
+
+ masterCmdData3.u32All = 0;
+ masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
+ masterCmdData3.u32All);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dce_psr_wait_loop(
+ struct dmcu *dmcu,
+ unsigned int wait_loop_number)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+ if (cached_wait_loop_number == wait_loop_number)
+ return;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ masterCmdData1.u32 = 0;
+ masterCmdData1.bits.wait_loop = wait_loop_number;
+ cached_wait_loop_number = wait_loop_number;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dce_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+{
+ *psr_wait_loop_number = cached_wait_loop_number;
+ return;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int count = 0;
+
+ REG_UPDATE(DMCU_CTRL, DMCU_ENABLE, 1);
+
+ /* Enable write access to IRAM */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 1,
+ IRAM_WR_ADDR_AUTO_INC, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ REG_WRITE(DMCU_IRAM_WR_CTRL, start_offset);
+
+ for (count = 0; count < bytes; count++)
+ REG_WRITE(DMCU_IRAM_WR_DATA, src[count]);
+
+ /* Disable write access to IRAM to allow dynamic sleep state */
+ REG_UPDATE_2(DMCU_RAM_ACCESS_CTRL,
+ IRAM_HOST_ACCESS_EN, 0,
+ IRAM_WR_ADDR_AUTO_INC, 0);
+
+ return true;
+}
+
+static void dcn10_get_dmcu_psr_state(struct dmcu *dmcu, uint32_t *psr_state)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ uint32_t psrStateOffset = 0xf0;
+
+ /* Enable write access to IRAM */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 1);
+
+ REG_WAIT(DMU_MEM_PWR_CNTL, DMCU_IRAM_MEM_PWR_STATE, 0, 2, 10);
+
+ /* Write address to IRAM_RD_ADDR in DMCU_IRAM_RD_CTRL */
+ REG_WRITE(DMCU_IRAM_RD_CTRL, psrStateOffset);
+
+ /* Read data from IRAM_RD_DATA in DMCU_IRAM_RD_DATA*/
+ *psr_state = REG_READ(DMCU_IRAM_RD_DATA);
+
+ /* Disable write access to IRAM after finished using IRAM
+ * in order to allow dynamic sleep state
+ */
+ REG_UPDATE(DMCU_RAM_ACCESS_CTRL, IRAM_HOST_ACCESS_EN, 0);
+}
+
+static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ unsigned int retryCount;
+ uint32_t psr_state = 0;
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_Cmd */
+ if (enable)
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_ENABLE);
+ else
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ PSR_EXIT);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Below loops 1000 x 500us = 500 ms.
+ * Exit PSR may need to wait 1-2 frames to power up. Timeout after at
+ * least a few frames. Should never hit the max retry assert below.
+ */
+ if (wait == true) {
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
+ }
+
+ /* assert if max retry hit */
+ ASSERT(retryCount <= 1000);
+ }
+}
+
+static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ unsigned int dmcu_max_retry_on_wait_reg_ready = 801;
+ unsigned int dmcu_wait_reg_ready_interval = 100;
+
+ union dce_dmcu_psr_config_data_reg1 masterCmdData1;
+ union dce_dmcu_psr_config_data_reg2 masterCmdData2;
+ union dce_dmcu_psr_config_data_reg3 masterCmdData3;
+
+ link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
+ psr_context->psrExitLinkTrainingRequired);
+
+ /* Enable static screen interrupts for PSR supported display */
+ /* Disable the interrupt coming from other displays. */
+ REG_UPDATE_4(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 0,
+ STATIC_SCREEN2_INT_TO_UC_EN, 0,
+ STATIC_SCREEN3_INT_TO_UC_EN, 0,
+ STATIC_SCREEN4_INT_TO_UC_EN, 0);
+
+ switch (psr_context->controllerId) {
+ /* Driver uses case 1 for unconfigured */
+ case 1:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN2_INT_TO_UC_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN3_INT_TO_UC_EN, 1);
+ break;
+ case 4:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN4_INT_TO_UC_EN, 1);
+ break;
+ case 5:
+ /* CZ/NL only has 4 CRTC!!
+ * really valid.
+ * There is no interrupt enable mask for these instances.
+ */
+ break;
+ case 6:
+ /* CZ/NL only has 4 CRTC!!
+ * These are here because they are defined in HW regspec,
+ * but not really valid. There is no interrupt enable mask
+ * for these instances.
+ */
+ break;
+ default:
+ REG_UPDATE(DMCU_INTERRUPT_TO_UC_EN_MASK,
+ STATIC_SCREEN1_INT_TO_UC_EN, 1);
+ break;
+ }
+
+ link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
+ psr_context->sdpTransmitLineNumDeadline);
+
+ if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
+ dmcu_wait_reg_ready_interval,
+ dmcu_max_retry_on_wait_reg_ready);
+
+ /* setDMCUParam_PSRHostConfigData */
+ masterCmdData1.u32All = 0;
+ masterCmdData1.bits.timehyst_frames = psr_context->timehyst_frames;
+ masterCmdData1.bits.hyst_lines = psr_context->hyst_lines;
+ masterCmdData1.bits.rfb_update_auto_en =
+ psr_context->rfb_update_auto_en;
+ masterCmdData1.bits.dp_port_num = psr_context->transmitterId;
+ masterCmdData1.bits.dcp_sel = psr_context->controllerId;
+ masterCmdData1.bits.phy_type = psr_context->phyType;
+ masterCmdData1.bits.frame_cap_ind =
+ psr_context->psrFrameCaptureIndicationReq;
+ masterCmdData1.bits.aux_chan = psr_context->channel;
+ masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
+ masterCmdData1.u32All);
+
+ masterCmdData2.u32All = 0;
+ masterCmdData2.bits.dig_fe = psr_context->engineId;
+ masterCmdData2.bits.dig_be = psr_context->transmitterId;
+ masterCmdData2.bits.skip_wait_for_pll_lock =
+ psr_context->skipPsrWaitForPllLock;
+ masterCmdData2.bits.frame_delay = psr_context->frame_delay;
+ masterCmdData2.bits.smu_phy_id = psr_context->smuPhyId;
+ masterCmdData2.bits.num_of_controllers =
+ psr_context->numberOfControllers;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG2),
+ masterCmdData2.u32All);
+
+ masterCmdData3.u32All = 0;
+ masterCmdData3.bits.psr_level = psr_context->psr_level.u32all;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
+ masterCmdData3.u32All);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG,
+ MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+}
+
+static void dcn10_psr_wait_loop(
+ struct dmcu *dmcu,
+ unsigned int wait_loop_number)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ union dce_dmcu_psr_config_data_wait_loop_reg1 masterCmdData1;
+ if (wait_loop_number != 0) {
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ masterCmdData1.u32 = 0;
+ masterCmdData1.bits.wait_loop = wait_loop_number;
+ cached_wait_loop_number = wait_loop_number;
+ dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1), masterCmdData1.u32);
+
+ /* setDMCUParam_Cmd */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, PSR_SET_WAITLOOP);
+
+ /* notifyDMCUMsg */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+ }
+}
+
+static void dcn10_get_psr_wait_loop(unsigned int *psr_wait_loop_number)
+{
+ *psr_wait_loop_number = cached_wait_loop_number;
+ return;
+}
+
+#endif
+
+static const struct dmcu_funcs dce_funcs = {
+ .load_iram = dce_dmcu_load_iram,
+ .set_psr_enable = dce_dmcu_set_psr_enable,
+ .setup_psr = dce_dmcu_setup_psr,
+ .get_psr_state = dce_get_dmcu_psr_state,
+ .set_psr_wait_loop = dce_psr_wait_loop,
+ .get_psr_wait_loop = dce_get_psr_wait_loop
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+static const struct dmcu_funcs dcn10_funcs = {
+ .load_iram = dcn10_dmcu_load_iram,
+ .set_psr_enable = dcn10_dmcu_set_psr_enable,
+ .setup_psr = dcn10_dmcu_setup_psr,
+ .get_psr_state = dcn10_get_dmcu_psr_state,
+ .set_psr_wait_loop = dcn10_psr_wait_loop,
+ .get_psr_wait_loop = dcn10_get_psr_wait_loop
+};
+#endif
+
+static void dce_dmcu_construct(
+ struct dce_dmcu *dmcu_dce,
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dmcu *base = &dmcu_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ dmcu_dce->regs = regs;
+ dmcu_dce->dmcu_shift = dmcu_shift;
+ dmcu_dce->dmcu_mask = dmcu_mask;
+}
+
+struct dmcu *dce_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dce_funcs;
+
+ return &dmcu_dce->base;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+struct dmcu *dcn10_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask)
+{
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
+
+ if (dmcu_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_dmcu_construct(
+ dmcu_dce, ctx, regs, dmcu_shift, dmcu_mask);
+
+ dmcu_dce->base.funcs = &dcn10_funcs;
+
+ return &dmcu_dce->base;
+}
+#endif
+
+void dce_dmcu_destroy(struct dmcu **dmcu)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu);
+
+ kfree(dmcu_dce);
+ *dmcu = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
new file mode 100644
index 000000000000..b85f53c2f6f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef _DCE_DMCU_H_
+#define _DCE_DMCU_H_
+
+#include "dmcu.h"
+
+#define DMCU_COMMON_REG_LIST_DCE_BASE() \
+ SR(DMCU_CTRL), \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_WR_CTRL), \
+ SR(DMCU_IRAM_WR_DATA), \
+ SR(MASTER_COMM_DATA_REG1), \
+ SR(MASTER_COMM_DATA_REG2), \
+ SR(MASTER_COMM_DATA_REG3), \
+ SR(MASTER_COMM_CMD_REG), \
+ SR(MASTER_COMM_CNTL_REG), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SR(SMU_INTERRUPT_CONTROL)
+
+#define DMCU_DCE110_COMMON_REG_LIST() \
+ DMCU_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define DMCU_DCN10_REG_LIST()\
+ DMCU_COMMON_REG_LIST_DCE_BASE(), \
+ SR(DMU_MEM_PWR_CNTL)
+
+#define DMCU_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ DMCU_SF(DMCU_CTRL, \
+ DMCU_ENABLE, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_HOST_ACCESS_EN, mask_sh), \
+ DMCU_SF(DMCU_RAM_ACCESS_CTRL, \
+ IRAM_WR_ADDR_AUTO_INC, mask_sh), \
+ DMCU_SF(MASTER_COMM_CMD_REG, \
+ MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
+ DMCU_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN1_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN2_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN3_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(DMCU_INTERRUPT_TO_UC_EN_MASK, \
+ STATIC_SCREEN4_INT_TO_UC_EN, mask_sh), \
+ DMCU_SF(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, mask_sh)
+
+#define DMCU_MASK_SH_LIST_DCE110(mask_sh) \
+ DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ DMCU_SF(DCI_MEM_PWR_STATUS, \
+ DMCU_IRAM_MEM_PWR_STATE, mask_sh)
+
+#define DMCU_MASK_SH_LIST_DCN10(mask_sh) \
+ DMCU_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ DMCU_SF(DMU_MEM_PWR_CNTL, \
+ DMCU_IRAM_MEM_PWR_STATE, mask_sh)
+
+#define DMCU_REG_FIELD_LIST(type) \
+ type DMCU_IRAM_MEM_PWR_STATE; \
+ type IRAM_HOST_ACCESS_EN; \
+ type IRAM_WR_ADDR_AUTO_INC; \
+ type DMCU_ENABLE; \
+ type MASTER_COMM_CMD_REG_BYTE0; \
+ type MASTER_COMM_INTERRUPT; \
+ type DPHY_RX_FAST_TRAINING_CAPABLE; \
+ type DPHY_LOAD_BS_COUNT; \
+ type STATIC_SCREEN1_INT_TO_UC_EN; \
+ type STATIC_SCREEN2_INT_TO_UC_EN; \
+ type STATIC_SCREEN3_INT_TO_UC_EN; \
+ type STATIC_SCREEN4_INT_TO_UC_EN; \
+ type DP_SEC_GSP0_LINE_NUM; \
+ type DP_SEC_GSP0_PRIORITY; \
+ type DC_SMU_INT_ENABLE
+
+struct dce_dmcu_shift {
+ DMCU_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_dmcu_mask {
+ DMCU_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_dmcu_registers {
+ uint32_t DMCU_CTRL;
+ uint32_t DMCU_RAM_ACCESS_CTRL;
+ uint32_t DCI_MEM_PWR_STATUS;
+ uint32_t DMU_MEM_PWR_CNTL;
+ uint32_t DMCU_IRAM_WR_CTRL;
+ uint32_t DMCU_IRAM_WR_DATA;
+
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t MASTER_COMM_DATA_REG2;
+ uint32_t MASTER_COMM_DATA_REG3;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t DMCU_IRAM_RD_CTRL;
+ uint32_t DMCU_IRAM_RD_DATA;
+ uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
+ uint32_t SMU_INTERRUPT_CONTROL;
+};
+
+struct dce_dmcu {
+ struct dmcu base;
+ const struct dce_dmcu_registers *regs;
+ const struct dce_dmcu_shift *dmcu_shift;
+ const struct dce_dmcu_mask *dmcu_mask;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG1 Bit position Data
+ * 7:0 hyst_frames[7:0]
+ * 14:8 hyst_lines[6:0]
+ * 15 RFB_UPDATE_AUTO_EN
+ * 18:16 phy_num[2:0]
+ * 21:19 dcp_sel[2:0]
+ * 22 phy_type
+ * 23 frame_cap_ind
+ * 26:24 aux_chan[2:0]
+ * 30:27 aux_repeat[3:0]
+ * 31:31 reserved[31:31]
+ ******************************************************************/
+union dce_dmcu_psr_config_data_reg1 {
+ struct {
+ unsigned int timehyst_frames:8; /*[7:0]*/
+ unsigned int hyst_lines:7; /*[14:8]*/
+ unsigned int rfb_update_auto_en:1; /*[15:15]*/
+ unsigned int dp_port_num:3; /*[18:16]*/
+ unsigned int dcp_sel:3; /*[21:19]*/
+ unsigned int phy_type:1; /*[22:22]*/
+ unsigned int frame_cap_ind:1; /*[23:23]*/
+ unsigned int aux_chan:3; /*[26:24]*/
+ unsigned int aux_repeat:4; /*[30:27]*/
+ unsigned int reserved:1; /*[31:31]*/
+ } bits;
+ unsigned int u32All;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG2
+ *******************************************************************/
+union dce_dmcu_psr_config_data_reg2 {
+ struct {
+ unsigned int dig_fe:3; /*[2:0]*/
+ unsigned int dig_be:3; /*[5:3]*/
+ unsigned int skip_wait_for_pll_lock:1; /*[6:6]*/
+ unsigned int reserved:9; /*[15:7]*/
+ unsigned int frame_delay:8; /*[23:16]*/
+ unsigned int smu_phy_id:4; /*[27:24]*/
+ unsigned int num_of_controllers:4; /*[31:28]*/
+ } bits;
+ unsigned int u32All;
+};
+
+/*******************************************************************
+ * MASTER_COMM_DATA_REG3
+ *******************************************************************/
+union dce_dmcu_psr_config_data_reg3 {
+ struct {
+ unsigned int psr_level:16; /*[15:0]*/
+ unsigned int link_rate:4; /*[19:16]*/
+ unsigned int reserved:12; /*[31:20]*/
+ } bits;
+ unsigned int u32All;
+};
+
+union dce_dmcu_psr_config_data_wait_loop_reg1 {
+ struct {
+ unsigned int wait_loop:16; /* [15:0] */
+ unsigned int reserved:16; /* [31:16] */
+ } bits;
+ unsigned int u32;
+};
+
+struct dmcu *dce_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
+
+struct dmcu *dcn10_dmcu_create(
+ struct dc_context *ctx,
+ const struct dce_dmcu_registers *regs,
+ const struct dce_dmcu_shift *dmcu_shift,
+ const struct dce_dmcu_mask *dmcu_mask);
+
+void dce_dmcu_destroy(struct dmcu **dmcu);
+
+#endif /* _DCE_ABM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
new file mode 100644
index 000000000000..d2e66b1bc0ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_hwseq.h"
+#include "reg_helper.h"
+#include "hw_sequencer.h"
+#include "core_types.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+void dce_enable_fe_clock(struct dce_hwseq *hws,
+ unsigned int fe_inst, bool enable)
+{
+ REG_UPDATE(DCFE_CLOCK_CONTROL[fe_inst],
+ DCFE_CLOCK_ENABLE, enable);
+}
+
+void dce_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ uint32_t lock_val = lock ? 1 : 0;
+ uint32_t dcp_grph, scl, blnd, update_lock_mode, val;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Not lock pipe when blank */
+ if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg))
+ return;
+
+ val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->pipe_idx],
+ BLND_DCP_GRPH_V_UPDATE_LOCK, &dcp_grph,
+ BLND_SCL_V_UPDATE_LOCK, &scl,
+ BLND_BLND_V_UPDATE_LOCK, &blnd,
+ BLND_V_UPDATE_LOCK_MODE, &update_lock_mode);
+
+ dcp_grph = lock_val;
+ scl = lock_val;
+ blnd = lock_val;
+ update_lock_mode = lock_val;
+
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ BLND_DCP_GRPH_V_UPDATE_LOCK, dcp_grph,
+ BLND_SCL_V_UPDATE_LOCK, scl);
+
+ if (hws->masks->BLND_BLND_V_UPDATE_LOCK != 0)
+ REG_SET_2(BLND_V_UPDATE_LOCK[pipe->pipe_idx], val,
+ BLND_BLND_V_UPDATE_LOCK, blnd,
+ BLND_V_UPDATE_LOCK_MODE, update_lock_mode);
+
+ if (hws->wa.blnd_crtc_trigger) {
+ if (!lock) {
+ uint32_t value = REG_READ(CRTC_H_BLANK_START_END[pipe->pipe_idx]);
+ REG_WRITE(CRTC_H_BLANK_START_END[pipe->pipe_idx], value);
+ }
+ }
+}
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+ unsigned int blnd_inst,
+ enum blnd_mode mode)
+{
+ uint32_t feedthrough = 1;
+ uint32_t blnd_mode = 0;
+ uint32_t multiplied_mode = 0;
+ uint32_t alpha_mode = 2;
+
+ switch (mode) {
+ case BLND_MODE_OTHER_PIPE:
+ feedthrough = 0;
+ blnd_mode = 1;
+ alpha_mode = 0;
+ break;
+ case BLND_MODE_BLENDING:
+ feedthrough = 0;
+ blnd_mode = 2;
+ alpha_mode = 0;
+ multiplied_mode = 1;
+ break;
+ case BLND_MODE_CURRENT_PIPE:
+ default:
+ if (REG(BLND_CONTROL[blnd_inst]) == REG(BLNDV_CONTROL) ||
+ blnd_inst == 0)
+ feedthrough = 0;
+ break;
+ }
+
+ REG_UPDATE(BLND_CONTROL[blnd_inst],
+ BLND_MODE, blnd_mode);
+
+ if (hws->masks->BLND_ALPHA_MODE != 0) {
+ REG_UPDATE_3(BLND_CONTROL[blnd_inst],
+ BLND_FEEDTHROUGH_EN, feedthrough,
+ BLND_ALPHA_MODE, alpha_mode,
+ BLND_MULTIPLIED_MODE, multiplied_mode);
+ }
+}
+
+
+static void dce_disable_sram_shut_down(struct dce_hwseq *hws)
+{
+ if (REG(DC_MEM_GLOBAL_PWR_REQ_CNTL))
+ REG_UPDATE(DC_MEM_GLOBAL_PWR_REQ_CNTL,
+ DC_MEM_GLOBAL_PWR_REQ_DIS, 1);
+}
+
+static void dce_underlay_clock_enable(struct dce_hwseq *hws)
+{
+ /* todo: why do we need this at boot? is dce_enable_fe_clock enough? */
+ if (REG(DCFEV_CLOCK_CONTROL))
+ REG_UPDATE(DCFEV_CLOCK_CONTROL,
+ DCFEV_CLOCK_ENABLE, 1);
+}
+
+static void enable_hw_base_light_sleep(void)
+{
+ /* TODO: implement */
+}
+
+static void disable_sw_manual_control_light_sleep(void)
+{
+ /* TODO: implement */
+}
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+ bool enable)
+{
+ if (enable) {
+ enable_hw_base_light_sleep();
+ disable_sw_manual_control_light_sleep();
+ } else {
+ dce_disable_sram_shut_down(hws);
+ dce_underlay_clock_enable(hws);
+ }
+}
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+ struct clock_source *clk_src,
+ unsigned int tg_inst)
+{
+ if (clk_src->id == CLOCK_SOURCE_ID_DP_DTO || clk_src->dp_clk_src) {
+ REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+ DP_DTO0_ENABLE, 1);
+
+ } else if (clk_src->id >= CLOCK_SOURCE_COMBO_PHY_PLL0) {
+ uint32_t rate_source = clk_src->id - CLOCK_SOURCE_COMBO_PHY_PLL0;
+
+ REG_UPDATE_2(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+ PHYPLL_PIXEL_RATE_SOURCE, rate_source,
+ PIXEL_RATE_PLL_SOURCE, 0);
+
+ REG_UPDATE(PIXEL_RATE_CNTL[tg_inst],
+ DP_DTO0_ENABLE, 0);
+
+ } else if (clk_src->id <= CLOCK_SOURCE_ID_PLL2) {
+ uint32_t rate_source = clk_src->id - CLOCK_SOURCE_ID_PLL0;
+
+ REG_UPDATE_2(PIXEL_RATE_CNTL[tg_inst],
+ PIXEL_RATE_SOURCE, rate_source,
+ DP_DTO0_ENABLE, 0);
+
+ if (REG(PHYPLL_PIXEL_RATE_CNTL[tg_inst]))
+ REG_UPDATE(PHYPLL_PIXEL_RATE_CNTL[tg_inst],
+ PIXEL_RATE_PLL_SOURCE, 1);
+ } else {
+ DC_ERR("Unknown clock source. clk_src id: %d, TG_inst: %d",
+ clk_src->id, tg_inst);
+ }
+}
+
+/* Only use LUT for 8 bit formats */
+bool dce_use_lut(const struct dc_plane_state *plane_state)
+{
+ switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
new file mode 100644
index 000000000000..52506155e361
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -0,0 +1,631 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_HWSEQ_H__
+#define __DCE_HWSEQ_H__
+
+#include "hw_sequencer.h"
+
+#define BL_REG_LIST()\
+ SR(LVTMA_PWRSEQ_CNTL), \
+ SR(LVTMA_PWRSEQ_STATE)
+
+#define HWSEQ_DCEF_REG_LIST_DCE8() \
+ .DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[2] = mmCRTC2_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[3] = mmCRTC3_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[4] = mmCRTC4_CRTC_DCFE_CLOCK_CONTROL, \
+ .DCFE_CLOCK_CONTROL[5] = mmCRTC5_CRTC_DCFE_CLOCK_CONTROL
+
+#define HWSEQ_DCEF_REG_LIST() \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 3), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 4), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 5), \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+
+#define HWSEQ_BLND_REG_LIST() \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 0), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 1), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 3), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 4), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 5), \
+ SRII(BLND_CONTROL, BLND, 0), \
+ SRII(BLND_CONTROL, BLND, 1), \
+ SRII(BLND_CONTROL, BLND, 2), \
+ SRII(BLND_CONTROL, BLND, 3), \
+ SRII(BLND_CONTROL, BLND, 4), \
+ SRII(BLND_CONTROL, BLND, 5)
+
+#define HWSEQ_PIXEL_RATE_REG_LIST(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1), \
+ SRII(PIXEL_RATE_CNTL, blk, 2), \
+ SRII(PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_PHYPLL_REG_LIST(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 2), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 3), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+
+#define HWSEQ_DCE11_REG_LIST_BASE() \
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
+ SR(DCFEV_CLOCK_CONTROL), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 0), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 1), \
+ SRII(CRTC_H_BLANK_START_END, CRTC, 0),\
+ SRII(CRTC_H_BLANK_START_END, CRTC, 1),\
+ SRII(BLND_V_UPDATE_LOCK, BLND, 0),\
+ SRII(BLND_V_UPDATE_LOCK, BLND, 1),\
+ SRII(BLND_CONTROL, BLND, 0),\
+ SRII(BLND_CONTROL, BLND, 1),\
+ SR(BLNDV_CONTROL),\
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
+ BL_REG_LIST()
+
+#define HWSEQ_DCE8_REG_LIST() \
+ HWSEQ_DCEF_REG_LIST_DCE8(), \
+ HWSEQ_BLND_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
+ BL_REG_LIST()
+
+#define HWSEQ_DCE10_REG_LIST() \
+ HWSEQ_DCEF_REG_LIST(), \
+ HWSEQ_BLND_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ BL_REG_LIST()
+
+#define HWSEQ_ST_REG_LIST() \
+ HWSEQ_DCE11_REG_LIST_BASE(), \
+ .DCFE_CLOCK_CONTROL[2] = mmDCFEV_CLOCK_CONTROL, \
+ .CRTC_H_BLANK_START_END[2] = mmCRTCV_H_BLANK_START_END, \
+ .BLND_V_UPDATE_LOCK[2] = mmBLNDV_V_UPDATE_LOCK, \
+ .BLND_CONTROL[2] = mmBLNDV_CONTROL
+
+#define HWSEQ_CZ_REG_LIST() \
+ HWSEQ_DCE11_REG_LIST_BASE(), \
+ SRII(DCFE_CLOCK_CONTROL, DCFE, 2), \
+ SRII(CRTC_H_BLANK_START_END, CRTC, 2), \
+ SRII(BLND_V_UPDATE_LOCK, BLND, 2), \
+ SRII(BLND_CONTROL, BLND, 2), \
+ .DCFE_CLOCK_CONTROL[3] = mmDCFEV_CLOCK_CONTROL, \
+ .CRTC_H_BLANK_START_END[3] = mmCRTCV_H_BLANK_START_END, \
+ .BLND_V_UPDATE_LOCK[3] = mmBLNDV_V_UPDATE_LOCK, \
+ .BLND_CONTROL[3] = mmBLNDV_CONTROL
+
+#define HWSEQ_DCE120_REG_LIST() \
+ HWSEQ_DCE10_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ HWSEQ_PHYPLL_REG_LIST(CRTC), \
+ SR(DCHUB_FB_LOCATION),\
+ SR(DCHUB_AGP_BASE),\
+ SR(DCHUB_AGP_BOT),\
+ SR(DCHUB_AGP_TOP), \
+ BL_REG_LIST()
+
+#define HWSEQ_DCE112_REG_LIST() \
+ HWSEQ_DCE10_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
+ HWSEQ_PHYPLL_REG_LIST(CRTC), \
+ BL_REG_LIST()
+
+#define HWSEQ_DCN_REG_LIST()\
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 0), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 1), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 2), \
+ SRII(OTG_GLOBAL_SYNC_STATUS, OTG, 3), \
+ SRII(DCHUBP_CNTL, HUBP, 0), \
+ SRII(DCHUBP_CNTL, HUBP, 1), \
+ SRII(DCHUBP_CNTL, HUBP, 2), \
+ SRII(DCHUBP_CNTL, HUBP, 3), \
+ SRII(HUBP_CLK_CNTL, HUBP, 0), \
+ SRII(HUBP_CLK_CNTL, HUBP, 1), \
+ SRII(HUBP_CLK_CNTL, HUBP, 2), \
+ SRII(HUBP_CLK_CNTL, HUBP, 3), \
+ SRII(DPP_CONTROL, DPP_TOP, 0), \
+ SRII(DPP_CONTROL, DPP_TOP, 1), \
+ SRII(DPP_CONTROL, DPP_TOP, 2), \
+ SRII(DPP_CONTROL, DPP_TOP, 3), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 0), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 1), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 2), \
+ SRII(OPP_PIPE_CONTROL, OPP_PIPE, 3), \
+ SR(REFCLK_CNTL), \
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
+ SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
+ SR(DCHUBBUB_ARB_SAT_LEVEL),\
+ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\
+ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \
+ SR(DCHUBBUB_TEST_DEBUG_INDEX), \
+ SR(DCHUBBUB_TEST_DEBUG_DATA), \
+ SR(DIO_MEM_PWR_CTRL), \
+ SR(DCCG_GATE_DISABLE_CNTL), \
+ SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCFCLK_CNTL),\
+ SR(DCFCLK_CNTL), \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32),\
+ MMHUB_SR(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32),\
+ MMHUB_SR(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32),\
+ MMHUB_SR(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_LOW_ADDR),\
+ MMHUB_SR(MC_VM_SYSTEM_APERTURE_HIGH_ADDR)
+
+#define HWSEQ_SR_WATERMARK_REG_LIST()\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D),\
+ SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
+
+#define HWSEQ_DCN1_REG_LIST()\
+ HWSEQ_DCN_REG_LIST(), \
+ HWSEQ_SR_WATERMARK_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST(OTG), \
+ HWSEQ_PHYPLL_REG_LIST(OTG), \
+ SR(DCHUBBUB_SDPIF_FB_TOP),\
+ SR(DCHUBBUB_SDPIF_FB_BASE),\
+ SR(DCHUBBUB_SDPIF_FB_OFFSET),\
+ SR(DCHUBBUB_SDPIF_AGP_BASE),\
+ SR(DCHUBBUB_SDPIF_AGP_BOT),\
+ SR(DCHUBBUB_SDPIF_AGP_TOP),\
+ SR(DOMAIN0_PG_CONFIG), \
+ SR(DOMAIN1_PG_CONFIG), \
+ SR(DOMAIN2_PG_CONFIG), \
+ SR(DOMAIN3_PG_CONFIG), \
+ SR(DOMAIN4_PG_CONFIG), \
+ SR(DOMAIN5_PG_CONFIG), \
+ SR(DOMAIN6_PG_CONFIG), \
+ SR(DOMAIN7_PG_CONFIG), \
+ SR(DOMAIN0_PG_STATUS), \
+ SR(DOMAIN1_PG_STATUS), \
+ SR(DOMAIN2_PG_STATUS), \
+ SR(DOMAIN3_PG_STATUS), \
+ SR(DOMAIN4_PG_STATUS), \
+ SR(DOMAIN5_PG_STATUS), \
+ SR(DOMAIN6_PG_STATUS), \
+ SR(DOMAIN7_PG_STATUS), \
+ SR(D1VGA_CONTROL), \
+ SR(D2VGA_CONTROL), \
+ SR(D3VGA_CONTROL), \
+ SR(D4VGA_CONTROL), \
+ SR(DC_IP_REQUEST_CNTL), \
+ BL_REG_LIST()
+
+struct dce_hwseq_registers {
+
+ /* Backlight registers */
+ uint32_t LVTMA_PWRSEQ_CNTL;
+ uint32_t LVTMA_PWRSEQ_STATE;
+
+ uint32_t DCFE_CLOCK_CONTROL[6];
+ uint32_t DCFEV_CLOCK_CONTROL;
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
+ uint32_t BLND_V_UPDATE_LOCK[6];
+ uint32_t BLND_CONTROL[6];
+ uint32_t BLNDV_CONTROL;
+ uint32_t CRTC_H_BLANK_START_END[6];
+ uint32_t PIXEL_RATE_CNTL[6];
+ uint32_t PHYPLL_PIXEL_RATE_CNTL[6];
+ /*DCHUB*/
+ uint32_t DCHUB_FB_LOCATION;
+ uint32_t DCHUB_AGP_BASE;
+ uint32_t DCHUB_AGP_BOT;
+ uint32_t DCHUB_AGP_TOP;
+
+ uint32_t OTG_GLOBAL_SYNC_STATUS[4];
+ uint32_t DCHUBP_CNTL[4];
+ uint32_t HUBP_CLK_CNTL[4];
+ uint32_t DPP_CONTROL[4];
+ uint32_t OPP_PIPE_CONTROL[4];
+ uint32_t REFCLK_CNTL;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;
+ uint32_t DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D;
+ uint32_t DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL;
+ uint32_t DCHUBBUB_ARB_SAT_LEVEL;
+ uint32_t DCHUBBUB_ARB_DF_REQ_OUTSTAND;
+ uint32_t DCHUBBUB_GLOBAL_TIMER_CNTL;
+ uint32_t DCHUBBUB_ARB_DRAM_STATE_CNTL;
+ uint32_t DCHUBBUB_TEST_DEBUG_INDEX;
+ uint32_t DCHUBBUB_TEST_DEBUG_DATA;
+ uint32_t DCHUBBUB_SDPIF_FB_TOP;
+ uint32_t DCHUBBUB_SDPIF_FB_BASE;
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
+ uint32_t DCHUBBUB_SDPIF_AGP_BASE;
+ uint32_t DCHUBBUB_SDPIF_AGP_BOT;
+ uint32_t DCHUBBUB_SDPIF_AGP_TOP;
+ uint32_t DC_IP_REQUEST_CNTL;
+ uint32_t DOMAIN0_PG_CONFIG;
+ uint32_t DOMAIN1_PG_CONFIG;
+ uint32_t DOMAIN2_PG_CONFIG;
+ uint32_t DOMAIN3_PG_CONFIG;
+ uint32_t DOMAIN4_PG_CONFIG;
+ uint32_t DOMAIN5_PG_CONFIG;
+ uint32_t DOMAIN6_PG_CONFIG;
+ uint32_t DOMAIN7_PG_CONFIG;
+ uint32_t DOMAIN0_PG_STATUS;
+ uint32_t DOMAIN1_PG_STATUS;
+ uint32_t DOMAIN2_PG_STATUS;
+ uint32_t DOMAIN3_PG_STATUS;
+ uint32_t DOMAIN4_PG_STATUS;
+ uint32_t DOMAIN5_PG_STATUS;
+ uint32_t DOMAIN6_PG_STATUS;
+ uint32_t DOMAIN7_PG_STATUS;
+ uint32_t DIO_MEM_PWR_CTRL;
+ uint32_t DCCG_GATE_DISABLE_CNTL;
+ uint32_t DCCG_GATE_DISABLE_CNTL2;
+ uint32_t DCFCLK_CNTL;
+ uint32_t MICROSECOND_TIME_BASE_DIV;
+ uint32_t MILLISECOND_TIME_BASE_DIV;
+ uint32_t DISPCLK_FREQ_CHANGE_CNTL;
+ uint32_t RBBMIF_TIMEOUT_DIS;
+ uint32_t RBBMIF_TIMEOUT_DIS_2;
+ uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DCHUBBUB_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_CTRL;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_R_G;
+ uint32_t DPP_TOP0_DPP_CRC_VAL_B_A;
+ uint32_t MPC_CRC_CTRL;
+ uint32_t MPC_CRC_RESULT_GB;
+ uint32_t MPC_CRC_RESULT_C;
+ uint32_t MPC_CRC_RESULT_AR;
+ uint32_t D1VGA_CONTROL;
+ uint32_t D2VGA_CONTROL;
+ uint32_t D3VGA_CONTROL;
+ uint32_t D4VGA_CONTROL;
+ /* MMHUB registers. read only. temporary hack */
+ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32;
+ uint32_t VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32;
+ uint32_t VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32;
+ uint32_t VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32;
+ uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
+ uint32_t MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
+ uint32_t MC_VM_SYSTEM_APERTURE_LOW_ADDR;
+ uint32_t MC_VM_SYSTEM_APERTURE_HIGH_ADDR;
+};
+ /* set field name */
+#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define HWS_SF1(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## blk_name ## field_name ## post_fix
+
+
+#define HWSEQ_DCEF_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF(blk, CLOCK_CONTROL, DCFE_CLOCK_ENABLE, mask_sh),\
+ SF(DC_MEM_GLOBAL_PWR_REQ_CNTL, DC_MEM_GLOBAL_PWR_REQ_DIS, mask_sh)
+
+#define HWSEQ_BLND_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_BLND_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(blk, V_UPDATE_LOCK, BLND_V_UPDATE_LOCK_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_FEEDTHROUGH_EN, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_ALPHA_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_MODE, mask_sh),\
+ HWS_SF(blk, CONTROL, BLND_MULTIPLIED_MODE, mask_sh)
+
+#define HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF1(blk, PIXEL_RATE_CNTL, PIXEL_RATE_SOURCE, mask_sh),\
+ HWS_SF(blk, PIXEL_RATE_CNTL, DP_DTO0_ENABLE, mask_sh)
+
+#define HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, blk)\
+ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
+ HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
+
+#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
+ .DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
+ HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
+ HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+ SF(DCFEV_CLOCK_CONTROL, DCFEV_CLOCK_ENABLE, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_DCE112_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh),\
+ HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_)
+
+#define HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
+ SF(DCHUB_FB_LOCATION, FB_TOP, mask_sh),\
+ SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
+ SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
+ SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
+ SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCE12_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE0_DCFE_),\
+ HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
+ HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_DCN_MASK_SH_LIST(mask_sh)\
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, OTG0_),\
+ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh), \
+ HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR, mask_sh), \
+ HWS_SF(OTG0_, OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_OCCURRED, mask_sh), \
+ HWS_SF(HUBP0_, DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh), \
+ HWS_SF(HUBP0_, HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh), \
+ HWS_SF(DPP_TOP0_, DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \
+ HWS_SF(OPP_PIPE0_, OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, mask_sh),\
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
+ HWS_SF(, DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HWS_SF(, DCFCLK_CNTL, DCFCLK_GATE_DIS, mask_sh)
+
+#define HWSEQ_DCN1_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF1(OTG0_, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_BASE, SDPIF_AGP_BASE, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_BOT, SDPIF_AGP_BOT, mask_sh), \
+ HWS_SF(, DCHUBBUB_SDPIF_AGP_TOP, SDPIF_AGP_TOP, mask_sh), \
+ HWS_SF(DPP_TOP0_, DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh), \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, PAGE_DIRECTORY_ENTRY_HI32, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, PAGE_DIRECTORY_ENTRY_LO32, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, LOGICAL_PAGE_NUMBER_HI4, mask_sh),\
+ HWS_SF(, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, LOGICAL_PAGE_NUMBER_LO32, mask_sh),\
+ HWS_SF(, VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, PHYSICAL_PAGE_ADDR_HI4, mask_sh),\
+ HWS_SF(, VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, PHYSICAL_PAGE_ADDR_LO32, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, PHYSICAL_PAGE_NUMBER_MSB, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, PHYSICAL_PAGE_NUMBER_LSB, mask_sh),\
+ HWS_SF(, MC_VM_SYSTEM_APERTURE_LOW_ADDR, LOGICAL_ADDR, mask_sh),\
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_CONFIG, DOMAIN0_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_CONFIG, DOMAIN1_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_CONFIG, DOMAIN2_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_CONFIG, DOMAIN3_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_CONFIG, DOMAIN4_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_CONFIG, DOMAIN5_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_CONFIG, DOMAIN6_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_CONFIG, DOMAIN7_POWER_GATE, mask_sh), \
+ HWS_SF(, DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN1_PG_STATUS, DOMAIN1_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN2_PG_STATUS, DOMAIN2_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN3_PG_STATUS, DOMAIN3_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN4_PG_STATUS, DOMAIN4_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN5_PG_STATUS, DOMAIN5_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
+ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+
+#define HWSEQ_REG_FIELD_LIST(type) \
+ type DCFE_CLOCK_ENABLE; \
+ type DCFEV_CLOCK_ENABLE; \
+ type DC_MEM_GLOBAL_PWR_REQ_DIS; \
+ type BLND_DCP_GRPH_V_UPDATE_LOCK; \
+ type BLND_SCL_V_UPDATE_LOCK; \
+ type BLND_DCP_GRPH_SURF_V_UPDATE_LOCK; \
+ type BLND_BLND_V_UPDATE_LOCK; \
+ type BLND_V_UPDATE_LOCK_MODE; \
+ type BLND_FEEDTHROUGH_EN; \
+ type BLND_ALPHA_MODE; \
+ type BLND_MODE; \
+ type BLND_MULTIPLIED_MODE; \
+ type DP_DTO0_ENABLE; \
+ type PIXEL_RATE_SOURCE; \
+ type PHYPLL_PIXEL_RATE_SOURCE; \
+ type PIXEL_RATE_PLL_SOURCE; \
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ type PAGE_DIRECTORY_ENTRY_HI32;\
+ type PAGE_DIRECTORY_ENTRY_LO32;\
+ type LOGICAL_PAGE_NUMBER_HI4;\
+ type LOGICAL_PAGE_NUMBER_LO32;\
+ type PHYSICAL_PAGE_ADDR_HI4;\
+ type PHYSICAL_PAGE_ADDR_LO32;\
+ type PHYSICAL_PAGE_NUMBER_MSB;\
+ type PHYSICAL_PAGE_NUMBER_LSB;\
+ type LOGICAL_ADDR; \
+ type ENABLE_L1_TLB;\
+ type SYSTEM_ACCESS_MODE;\
+ type LVTMA_BLON;\
+ type LVTMA_PWRSEQ_TARGET_STATE_R;
+
+#define HWSEQ_DCN_REG_FIELD_LIST(type) \
+ type VUPDATE_NO_LOCK_EVENT_CLEAR; \
+ type VUPDATE_NO_LOCK_EVENT_OCCURRED; \
+ type HUBP_VTG_SEL; \
+ type HUBP_CLOCK_ENABLE; \
+ type DPP_CLOCK_ENABLE; \
+ type DPPCLK_RATE_CONTROL; \
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+ type SDPIF_AGP_BASE;\
+ type SDPIF_AGP_BOT;\
+ type SDPIF_AGP_TOP;\
+ type FB_TOP;\
+ type FB_BASE;\
+ type FB_OFFSET;\
+ type AGP_BASE;\
+ type AGP_BOT;\
+ type AGP_TOP;\
+ type DCHUBBUB_GLOBAL_TIMER_ENABLE; \
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST;\
+ type DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE;\
+ type DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE;\
+ type DCHUBBUB_ARB_SAT_LEVEL;\
+ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\
+ type OPP_PIPE_CLOCK_EN;\
+ type IP_REQUEST_EN; \
+ type DOMAIN0_POWER_FORCEON; \
+ type DOMAIN0_POWER_GATE; \
+ type DOMAIN1_POWER_FORCEON; \
+ type DOMAIN1_POWER_GATE; \
+ type DOMAIN2_POWER_FORCEON; \
+ type DOMAIN2_POWER_GATE; \
+ type DOMAIN3_POWER_FORCEON; \
+ type DOMAIN3_POWER_GATE; \
+ type DOMAIN4_POWER_FORCEON; \
+ type DOMAIN4_POWER_GATE; \
+ type DOMAIN5_POWER_FORCEON; \
+ type DOMAIN5_POWER_GATE; \
+ type DOMAIN6_POWER_FORCEON; \
+ type DOMAIN6_POWER_GATE; \
+ type DOMAIN7_POWER_FORCEON; \
+ type DOMAIN7_POWER_GATE; \
+ type DOMAIN0_PGFSM_PWR_STATUS; \
+ type DOMAIN1_PGFSM_PWR_STATUS; \
+ type DOMAIN2_PGFSM_PWR_STATUS; \
+ type DOMAIN3_PGFSM_PWR_STATUS; \
+ type DOMAIN4_PGFSM_PWR_STATUS; \
+ type DOMAIN5_PGFSM_PWR_STATUS; \
+ type DOMAIN6_PGFSM_PWR_STATUS; \
+ type DOMAIN7_PGFSM_PWR_STATUS; \
+ type DCFCLK_GATE_DIS; \
+ type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
+ type DENTIST_DPPCLK_WDIVIDER;
+
+struct dce_hwseq_shift {
+ HWSEQ_REG_FIELD_LIST(uint8_t)
+ HWSEQ_DCN_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_hwseq_mask {
+ HWSEQ_REG_FIELD_LIST(uint32_t)
+ HWSEQ_DCN_REG_FIELD_LIST(uint32_t)
+};
+
+
+enum blnd_mode {
+ BLND_MODE_CURRENT_PIPE = 0,/* Data from current pipe only */
+ BLND_MODE_OTHER_PIPE, /* Data from other pipe only */
+ BLND_MODE_BLENDING,/* Alpha blending - blend 'current' and 'other' */
+};
+
+void dce_enable_fe_clock(struct dce_hwseq *hwss,
+ unsigned int inst, bool enable);
+
+void dce_pipe_control_lock(struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+
+void dce_set_blender_mode(struct dce_hwseq *hws,
+ unsigned int blnd_inst, enum blnd_mode mode);
+
+void dce_clock_gating_power_up(struct dce_hwseq *hws,
+ bool enable);
+
+void dce_crtc_switch_to_clk_src(struct dce_hwseq *hws,
+ struct clock_source *clk_src,
+ unsigned int tg_inst);
+
+bool dce_use_lut(const struct dc_plane_state *plane_state);
+#endif /*__DCE_HWSEQ_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
new file mode 100644
index 000000000000..d618fdd0cc82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_ipp.h"
+#include "reg_helper.h"
+#include "dm_services.h"
+
+#define REG(reg) \
+ (ipp_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ipp_dce->ipp_shift->field_name, ipp_dce->ipp_mask->field_name
+
+#define CTX \
+ ipp_dce->base.ctx
+
+
+static void dce_ipp_cursor_set_position(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_position *position,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* lock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true);
+
+ /* Flag passed in structure differentiates cursor enable/disable. */
+ /* Update if it differs from cached state. */
+ REG_UPDATE(CUR_CONTROL, CURSOR_EN, position->enable);
+
+ REG_SET_2(CUR_POSITION, 0,
+ CURSOR_X_POSITION, position->x,
+ CURSOR_Y_POSITION, position->y);
+
+ REG_SET_2(CUR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, position->x_hotspot,
+ CURSOR_HOT_SPOT_Y, position->y_hotspot);
+
+ /* unlock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false);
+}
+
+static void dce_ipp_cursor_set_attributes(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_attributes *attributes)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+ int mode;
+
+ /* Lock cursor registers */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, true);
+
+ /* Program cursor control */
+ switch (attributes->color_format) {
+ case CURSOR_MODE_MONO:
+ mode = 0;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ mode = 1;
+ break;
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ mode = 2;
+ break;
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ mode = 3;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* unsupported */
+ mode = 0;
+ }
+
+ REG_UPDATE_3(CUR_CONTROL,
+ CURSOR_MODE, mode,
+ CURSOR_2X_MAGNIFY, attributes->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CUR_INV_TRANS_CLAMP, attributes->attribute_flags.bits.INVERSE_TRANSPARENT_CLAMPING);
+
+ if (attributes->color_format == CURSOR_MODE_MONO) {
+ REG_SET_3(CUR_COLOR1, 0,
+ CUR_COLOR1_BLUE, 0,
+ CUR_COLOR1_GREEN, 0,
+ CUR_COLOR1_RED, 0);
+
+ REG_SET_3(CUR_COLOR2, 0,
+ CUR_COLOR2_BLUE, 0xff,
+ CUR_COLOR2_GREEN, 0xff,
+ CUR_COLOR2_RED, 0xff);
+ }
+
+ /*
+ * Program cursor size -- NOTE: HW spec specifies that HW register
+ * stores size as (height - 1, width - 1)
+ */
+ REG_SET_2(CUR_SIZE, 0,
+ CURSOR_WIDTH, attributes->width-1,
+ CURSOR_HEIGHT, attributes->height-1);
+
+ /* Program cursor surface address */
+ /* SURFACE_ADDRESS_HIGH: Higher order bits (39:32) of hardware cursor
+ * surface base address in byte. It is 4K byte aligned.
+ * The correct way to program cursor surface address is to first write
+ * to CUR_SURFACE_ADDRESS_HIGH, and then write to CUR_SURFACE_ADDRESS
+ */
+ REG_SET(CUR_SURFACE_ADDRESS_HIGH, 0,
+ CURSOR_SURFACE_ADDRESS_HIGH, attributes->address.high_part);
+
+ REG_SET(CUR_SURFACE_ADDRESS, 0,
+ CURSOR_SURFACE_ADDRESS, attributes->address.low_part);
+
+ /* Unlock Cursor registers. */
+ REG_UPDATE(CUR_UPDATE, CURSOR_UPDATE_LOCK, false);
+}
+
+
+static void dce_ipp_program_prescale(
+ struct input_pixel_processor *ipp,
+ struct ipp_prescale_params *params)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* set to bypass mode first before change */
+ REG_UPDATE(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_BYPASS,
+ 1);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_R, 0,
+ GRPH_PRESCALE_SCALE_R, params->scale,
+ GRPH_PRESCALE_BIAS_R, params->bias);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_G, 0,
+ GRPH_PRESCALE_SCALE_G, params->scale,
+ GRPH_PRESCALE_BIAS_G, params->bias);
+
+ REG_SET_2(PRESCALE_VALUES_GRPH_B, 0,
+ GRPH_PRESCALE_SCALE_B, params->scale,
+ GRPH_PRESCALE_BIAS_B, params->bias);
+
+ if (params->mode != IPP_PRESCALE_MODE_BYPASS) {
+ REG_UPDATE(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_BYPASS, 0);
+
+ /* If prescale is in use, then legacy lut should be bypassed */
+ REG_UPDATE(INPUT_GAMMA_CONTROL,
+ GRPH_INPUT_GAMMA_MODE, 1);
+ }
+}
+
+static void dce_ipp_program_input_lut(
+ struct input_pixel_processor *ipp,
+ const struct dc_gamma *gamma)
+{
+ int i;
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+
+ /* power on LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 1);
+
+ /* enable all */
+ REG_SET(DC_LUT_WRITE_EN_MASK, 0, DC_LUT_WRITE_EN_MASK, 0x7);
+
+ /* 256 entry mode */
+ REG_UPDATE(DC_LUT_RW_MODE, DC_LUT_RW_MODE, 0);
+
+ /* LUT-256, unsigned, integer, new u0.12 format */
+ REG_SET_3(DC_LUT_CONTROL, 0,
+ DC_LUT_DATA_R_FORMAT, 3,
+ DC_LUT_DATA_G_FORMAT, 3,
+ DC_LUT_DATA_B_FORMAT, 3);
+
+ /* start from index 0 */
+ REG_SET(DC_LUT_RW_INDEX, 0,
+ DC_LUT_RW_INDEX, 0);
+
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.red[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.green[i]));
+ REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.blue[i]));
+ }
+
+ /* power off LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_SET(DCFE_MEM_PWR_CTRL, 0, DCP_LUT_MEM_PWR_DIS, 0);
+
+ /* bypass prescale, enable legacy LUT */
+ REG_UPDATE(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
+ REG_UPDATE(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
+}
+
+static void dce_ipp_set_degamma(
+ struct input_pixel_processor *ipp,
+ enum ipp_degamma_mode mode)
+{
+ struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp);
+ uint32_t degamma_type = (mode == IPP_DEGAMMA_MODE_HW_sRGB) ? 1 : 0;
+
+ ASSERT(mode == IPP_DEGAMMA_MODE_BYPASS ||
+ mode == IPP_DEGAMMA_MODE_HW_sRGB);
+
+ REG_SET_3(DEGAMMA_CONTROL, 0,
+ GRPH_DEGAMMA_MODE, degamma_type,
+ CURSOR_DEGAMMA_MODE, degamma_type,
+ CURSOR2_DEGAMMA_MODE, degamma_type);
+}
+
+static const struct ipp_funcs dce_ipp_funcs = {
+ .ipp_cursor_set_attributes = dce_ipp_cursor_set_attributes,
+ .ipp_cursor_set_position = dce_ipp_cursor_set_position,
+ .ipp_program_prescale = dce_ipp_program_prescale,
+ .ipp_program_input_lut = dce_ipp_program_input_lut,
+ .ipp_set_degamma = dce_ipp_set_degamma
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce_ipp_construct(
+ struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask)
+{
+ ipp_dce->base.ctx = ctx;
+ ipp_dce->base.inst = inst;
+ ipp_dce->base.funcs = &dce_ipp_funcs;
+
+ ipp_dce->regs = regs;
+ ipp_dce->ipp_shift = ipp_shift;
+ ipp_dce->ipp_mask = ipp_mask;
+}
+
+void dce_ipp_destroy(struct input_pixel_processor **ipp)
+{
+ kfree(TO_DCE_IPP(*ipp));
+ *ipp = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
new file mode 100644
index 000000000000..ca04e97d44c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCE_IPP_H_
+#define _DCE_IPP_H_
+
+#include "ipp.h"
+
+#define TO_DCE_IPP(ipp)\
+ container_of(ipp, struct dce_ipp, base)
+
+#define IPP_COMMON_REG_LIST_DCE_BASE(id) \
+ SRI(CUR_UPDATE, DCP, id), \
+ SRI(CUR_CONTROL, DCP, id), \
+ SRI(CUR_POSITION, DCP, id), \
+ SRI(CUR_HOT_SPOT, DCP, id), \
+ SRI(CUR_COLOR1, DCP, id), \
+ SRI(CUR_COLOR2, DCP, id), \
+ SRI(CUR_SIZE, DCP, id), \
+ SRI(CUR_SURFACE_ADDRESS_HIGH, DCP, id), \
+ SRI(CUR_SURFACE_ADDRESS, DCP, id), \
+ SRI(PRESCALE_GRPH_CONTROL, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_R, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_G, DCP, id), \
+ SRI(PRESCALE_VALUES_GRPH_B, DCP, id), \
+ SRI(INPUT_GAMMA_CONTROL, DCP, id), \
+ SRI(DC_LUT_WRITE_EN_MASK, DCP, id), \
+ SRI(DC_LUT_RW_MODE, DCP, id), \
+ SRI(DC_LUT_CONTROL, DCP, id), \
+ SRI(DC_LUT_RW_INDEX, DCP, id), \
+ SRI(DC_LUT_SEQ_COLOR, DCP, id), \
+ SRI(DEGAMMA_CONTROL, DCP, id)
+
+#define IPP_DCE100_REG_LIST_DCE_BASE(id) \
+ IPP_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, CRTC, id)
+
+#define IPP_DCE110_REG_LIST_DCE_BASE(id) \
+ IPP_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, DCFE, id)
+
+#define IPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ IPP_SF(CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_EN, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \
+ IPP_SF(CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CUR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \
+ IPP_SF(PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \
+ IPP_SF(INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \
+ IPP_SF(DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \
+ IPP_SF(DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \
+ IPP_SF(DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \
+ IPP_SF(DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh)
+
+#define IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ IPP_SF(DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh)
+
+#define IPP_DCE120_MASK_SH_LIST_SOC_BASE(mask_sh) \
+ IPP_SF(DCP0_CUR_UPDATE, CURSOR_UPDATE_LOCK, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_EN, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(DCP0_CUR_CONTROL, CUR_INV_TRANS_CLAMP, mask_sh), \
+ IPP_SF(DCP0_CUR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(DCP0_CUR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(DCP0_CUR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(DCP0_CUR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_BLUE, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_GREEN, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR1, CUR_COLOR1_RED, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_BLUE, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_GREEN, mask_sh), \
+ IPP_SF(DCP0_CUR_COLOR2, CUR_COLOR2_RED, mask_sh), \
+ IPP_SF(DCP0_CUR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(DCP0_CUR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(DCP0_CUR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(DCP0_CUR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_SCALE_R, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_R, GRPH_PRESCALE_BIAS_R, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_SCALE_G, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_G, GRPH_PRESCALE_BIAS_G, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_SCALE_B, mask_sh), \
+ IPP_SF(DCP0_PRESCALE_VALUES_GRPH_B, GRPH_PRESCALE_BIAS_B, mask_sh), \
+ IPP_SF(DCP0_INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, mask_sh), \
+ IPP_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_WRITE_EN_MASK, DC_LUT_WRITE_EN_MASK, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_RW_MODE, DC_LUT_RW_MODE, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_R_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_G_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_CONTROL, DC_LUT_DATA_B_FORMAT, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_RW_INDEX, DC_LUT_RW_INDEX, mask_sh), \
+ IPP_SF(DCP0_DC_LUT_SEQ_COLOR, DC_LUT_SEQ_COLOR, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, mask_sh), \
+ IPP_SF(DCP0_DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, mask_sh)
+
+#define IPP_REG_FIELD_LIST(type) \
+ type CURSOR_UPDATE_LOCK; \
+ type CURSOR_EN; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CUR_INV_TRANS_CLAMP; \
+ type CUR_COLOR1_BLUE; \
+ type CUR_COLOR1_GREEN; \
+ type CUR_COLOR1_RED; \
+ type CUR_COLOR2_BLUE; \
+ type CUR_COLOR2_GREEN; \
+ type CUR_COLOR2_RED; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type GRPH_PRESCALE_BYPASS; \
+ type GRPH_PRESCALE_SCALE_R; \
+ type GRPH_PRESCALE_BIAS_R; \
+ type GRPH_PRESCALE_SCALE_G; \
+ type GRPH_PRESCALE_BIAS_G; \
+ type GRPH_PRESCALE_SCALE_B; \
+ type GRPH_PRESCALE_BIAS_B; \
+ type GRPH_INPUT_GAMMA_MODE; \
+ type DCP_LUT_MEM_PWR_DIS; \
+ type DC_LUT_WRITE_EN_MASK; \
+ type DC_LUT_RW_MODE; \
+ type DC_LUT_DATA_R_FORMAT; \
+ type DC_LUT_DATA_G_FORMAT; \
+ type DC_LUT_DATA_B_FORMAT; \
+ type DC_LUT_RW_INDEX; \
+ type DC_LUT_SEQ_COLOR; \
+ type GRPH_DEGAMMA_MODE; \
+ type CURSOR_DEGAMMA_MODE; \
+ type CURSOR2_DEGAMMA_MODE
+
+struct dce_ipp_shift {
+ IPP_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_ipp_mask {
+ IPP_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_ipp_registers {
+ uint32_t CUR_UPDATE;
+ uint32_t CUR_CONTROL;
+ uint32_t CUR_POSITION;
+ uint32_t CUR_HOT_SPOT;
+ uint32_t CUR_COLOR1;
+ uint32_t CUR_COLOR2;
+ uint32_t CUR_SIZE;
+ uint32_t CUR_SURFACE_ADDRESS_HIGH;
+ uint32_t CUR_SURFACE_ADDRESS;
+ uint32_t PRESCALE_GRPH_CONTROL;
+ uint32_t PRESCALE_VALUES_GRPH_R;
+ uint32_t PRESCALE_VALUES_GRPH_G;
+ uint32_t PRESCALE_VALUES_GRPH_B;
+ uint32_t INPUT_GAMMA_CONTROL;
+ uint32_t DCFE_MEM_PWR_CTRL;
+ uint32_t DC_LUT_WRITE_EN_MASK;
+ uint32_t DC_LUT_RW_MODE;
+ uint32_t DC_LUT_CONTROL;
+ uint32_t DC_LUT_RW_INDEX;
+ uint32_t DC_LUT_SEQ_COLOR;
+ uint32_t DEGAMMA_CONTROL;
+};
+
+struct dce_ipp {
+ struct input_pixel_processor base;
+ const struct dce_ipp_registers *regs;
+ const struct dce_ipp_shift *ipp_shift;
+ const struct dce_ipp_mask *ipp_mask;
+};
+
+void dce_ipp_construct(struct dce_ipp *ipp_dce,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_ipp_registers *regs,
+ const struct dce_ipp_shift *ipp_shift,
+ const struct dce_ipp_mask *ipp_mask);
+
+void dce_ipp_destroy(struct input_pixel_processor **ipp);
+
+#endif /* _DCE_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
new file mode 100644
index 000000000000..fe88852b4774
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -0,0 +1,1379 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dce_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+#ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE__SHIFT 0xa
+#endif
+
+#ifndef DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK
+#define DMU_MEM_PWR_CNTL__DMCU_IRAM_MEM_PWR_STATE_MASK 0x00000400L
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+#endif
+
+#ifndef HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#endif
+
+#define CTX \
+ enc110->base.ctx
+
+#define REG(reg)\
+ (enc110->link_regs->reg)
+
+#define AUX_REG(reg)\
+ (enc110->aux_regs->reg)
+
+#define HPD_REG(reg)\
+ (enc110->hpd_regs->reg)
+
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+#define AUX_MAX_DEFER_WRITE_RETRY 20
+/*
+ * @brief
+ * Trigger Source Select
+ * ASIC-dependent, actual values for register programming
+ */
+#define DCE110_DIG_FE_SOURCE_SELECT_INVALID 0x0
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGA 0x1
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGB 0x2
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGC 0x4
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGD 0x08
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGE 0x10
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
+#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
+
+/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
+#define TMDS_MIN_PIXEL_CLOCK 25000
+/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
+#define TMDS_MAX_PIXEL_CLOCK 165000
+/* For current ASICs pixel clock - 600MHz */
+#define MAX_ENCODER_CLOCK 600000
+
+enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DIG_REG(reg)\
+ (reg + enc110->offsets.dig)
+
+#define DP_REG(reg)\
+ (reg + enc110->offsets.dp)
+
+static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ dce110_link_encoder_validate_output_with_stream,
+ .hw_init = dce110_link_encoder_hw_init,
+ .setup = dce110_link_encoder_setup,
+ .enable_tmds_output = dce110_link_encoder_enable_tmds_output,
+ .enable_dp_output = dce110_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dce110_link_encoder_enable_dp_mst_output,
+ .disable_output = dce110_link_encoder_disable_output,
+ .dp_set_lane_settings = dce110_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dce110_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dce110_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dce110_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dce110_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dce110_link_encoder_enable_hpd,
+ .disable_hpd = dce110_link_encoder_disable_hpd,
+ .destroy = dce110_link_encoder_destroy
+};
+
+static enum bp_result link_transmitter_control(
+ struct dce110_link_encoder *enc110,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+ struct dc_bios *bp = enc110->base.ctx->dc_bios;
+
+ result = bp->funcs->transmitter_control(bp, cntl);
+
+ return result;
+}
+
+static void enable_phy_bypass_mode(
+ struct dce110_link_encoder *enc110,
+ bool enable)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable);
+
+}
+
+static void disable_prbs_symbols(
+ struct dce110_link_encoder *enc110,
+ bool disable)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE_4(DP_DPHY_CNTL,
+ DPHY_ATEST_SEL_LANE0, disable,
+ DPHY_ATEST_SEL_LANE1, disable,
+ DPHY_ATEST_SEL_LANE2, disable,
+ DPHY_ATEST_SEL_LANE3, disable);
+}
+
+static void disable_prbs_mode(
+ struct dce110_link_encoder *enc110)
+{
+ REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0);
+}
+
+static void program_pattern_symbols(
+ struct dce110_link_encoder *enc110,
+ uint16_t pattern_symbols[8])
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_3(DP_DPHY_SYM0, 0,
+ DPHY_SYM1, pattern_symbols[0],
+ DPHY_SYM2, pattern_symbols[1],
+ DPHY_SYM3, pattern_symbols[2]);
+
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_3(DP_DPHY_SYM1, 0,
+ DPHY_SYM4, pattern_symbols[3],
+ DPHY_SYM5, pattern_symbols[4],
+ DPHY_SYM6, pattern_symbols[5]);
+
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_SET_2(DP_DPHY_SYM2, 0,
+ DPHY_SYM7, pattern_symbols[6],
+ DPHY_SYM8, pattern_symbols[7]);
+}
+
+static void set_dp_phy_pattern_d102(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* For 10-bit PRBS or debug symbols
+ * please use the following sequence: */
+
+ /* Enable debug symbols on the lanes */
+
+ disable_prbs_symbols(enc110, true);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+
+ /* Program debug symbols to be output */
+ {
+ uint16_t pattern_symbols[8] = {
+ 0x2AA, 0x2AA, 0x2AA, 0x2AA,
+ 0x2AA, 0x2AA, 0x2AA, 0x2AA
+ };
+
+ program_pattern_symbols(enc110, pattern_symbols);
+ }
+
+ /* Enable phy bypass mode to enable the test pattern */
+
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_link_training_complete(
+ struct dce110_link_encoder *enc110,
+ bool complete)
+{
+ /* This register resides in DP back end block;
+ * transmitter is used for the offset */
+
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete);
+
+}
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+ struct link_encoder *enc,
+ uint32_t index)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ /* Write Training Pattern */
+
+ REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index);
+
+ /* Set HW Register Training Complete to false */
+
+ set_link_training_complete(enc110, false);
+
+ /* Disable PHY Bypass mode to output Training Pattern */
+
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+}
+
+static void setup_panel_mode(
+ struct dce110_link_encoder *enc110,
+ enum dp_panel_mode panel_mode)
+{
+ uint32_t value;
+
+ ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
+ value = REG_READ(DP_DPHY_INTERNAL_CTRL);
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ value = 0x1;
+ break;
+ case DP_PANEL_MODE_SPECIAL:
+ value = 0x11;
+ break;
+ default:
+ value = 0x0;
+ break;
+ }
+
+ REG_WRITE(DP_DPHY_INTERNAL_CTRL, value);
+}
+
+static void set_dp_phy_pattern_symbol_error(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* program correct panel mode*/
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+
+ /* A PRBS23 pattern is used for most DP electrical measurements. */
+
+ /* Enable PRBS symbols on the lanes */
+ disable_prbs_symbols(enc110, false);
+
+ /* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */
+ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+ DPHY_PRBS_SEL, 1,
+ DPHY_PRBS_EN, 1);
+
+ /* Enable phy bypass mode to enable the test pattern */
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_prbs7(
+ struct dce110_link_encoder *enc110)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* A PRBS7 pattern is used for most DP electrical measurements. */
+
+ /* Enable PRBS symbols on the lanes */
+ disable_prbs_symbols(enc110, false);
+
+ /* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */
+ REG_UPDATE_2(DP_DPHY_PRBS_CNTL,
+ DPHY_PRBS_SEL, 0,
+ DPHY_PRBS_EN, 1);
+
+ /* Enable phy bypass mode to enable the test pattern */
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_80bit_custom(
+ struct dce110_link_encoder *enc110,
+ const uint8_t *pattern)
+{
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Enable debug symbols on the lanes */
+
+ disable_prbs_symbols(enc110, true);
+
+ /* Enable PHY bypass mode to enable the test pattern */
+ /* TODO is it really needed ? */
+
+ enable_phy_bypass_mode(enc110, true);
+
+ /* Program 80 bit custom pattern */
+ {
+ uint16_t pattern_symbols[8];
+
+ pattern_symbols[0] =
+ ((pattern[1] & 0x03) << 8) | pattern[0];
+ pattern_symbols[1] =
+ ((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f);
+ pattern_symbols[2] =
+ ((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f);
+ pattern_symbols[3] =
+ (pattern[4] << 2) | ((pattern[3] >> 6) & 0x03);
+ pattern_symbols[4] =
+ ((pattern[6] & 0x03) << 8) | pattern[5];
+ pattern_symbols[5] =
+ ((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f);
+ pattern_symbols[6] =
+ ((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f);
+ pattern_symbols[7] =
+ (pattern[9] << 2) | ((pattern[8] >> 6) & 0x03);
+
+ program_pattern_symbols(enc110, pattern_symbols);
+ }
+
+ /* Enable phy bypass mode to enable the test pattern */
+
+ enable_phy_bypass_mode(enc110, true);
+}
+
+static void set_dp_phy_pattern_hbr2_compliance_cp2520_2(
+ struct dce110_link_encoder *enc110,
+ unsigned int cp2520_pattern)
+{
+
+ /* previously there is a register DP_HBR2_EYE_PATTERN
+ * that is enabled to get the pattern.
+ * But it does not work with the latest spec change,
+ * so we are programming the following registers manually.
+ *
+ * The following settings have been confirmed
+ * by Nick Chorney and Sandra Liu */
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Setup DIG encoder in DP SST mode */
+ enc110->base.funcs->setup(&enc110->base, SIGNAL_TYPE_DISPLAY_PORT);
+
+ /* ensure normal panel mode. */
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+
+ /* no vbid after BS (SR)
+ * DP_LINK_FRAMING_CNTL changed history Sandra Liu
+ * 11000260 / 11000104 / 110000FC */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0xFC,
+ DP_VBID_DISABLE, 1,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ /* swap every BS with SR */
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0);
+
+ /* select cp2520 patterns */
+ if (REG(DP_DPHY_HBR2_PATTERN_CONTROL))
+ REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL,
+ DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern);
+ else
+ /* pre-DCE11 can only generate CP2520 pattern 2 */
+ ASSERT(cp2520_pattern == 2);
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* disable video stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+}
+
+static void set_dp_phy_pattern_passthrough_mode(
+ struct dce110_link_encoder *enc110,
+ enum dp_panel_mode panel_mode)
+{
+ /* program correct panel mode */
+ setup_panel_mode(enc110, panel_mode);
+
+ /* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT
+ * in case we were doing HBR2 compliance pattern before
+ */
+ REG_UPDATE_3(DP_LINK_FRAMING_CNTL,
+ DP_IDLE_BS_INTERVAL, 0x2000,
+ DP_VBID_DISABLE, 0,
+ DP_VID_ENHANCED_FRAME_MODE, 1);
+
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF);
+
+ /* set link training complete */
+ set_link_training_complete(enc110, true);
+
+ /* Disable PHY Bypass mode to setup the test pattern */
+ enable_phy_bypass_mode(enc110, false);
+
+ /* Disable PRBS mode */
+ disable_prbs_mode(enc110);
+}
+
+/* return value is bit-vector */
+static uint8_t get_frontend_source(
+ enum engine_id engine)
+{
+ switch (engine) {
+ case ENGINE_ID_DIGA:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGA;
+ case ENGINE_ID_DIGB:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGB;
+ case ENGINE_ID_DIGC:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGC;
+ case ENGINE_ID_DIGD:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGD;
+ case ENGINE_ID_DIGE:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGE;
+ case ENGINE_ID_DIGF:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGF;
+ case ENGINE_ID_DIGG:
+ return DCE110_DIG_FE_SOURCE_SELECT_DIGG;
+ default:
+ ASSERT_CRITICAL(false);
+ return DCE110_DIG_FE_SOURCE_SELECT_INVALID;
+ }
+}
+
+static void configure_encoder(
+ struct dce110_link_encoder *enc110,
+ const struct dc_link_settings *link_settings)
+{
+ /* set number of lanes */
+
+ REG_SET(DP_CONFIG, 0,
+ DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE);
+
+ /* setup scrambler */
+ REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1);
+}
+
+static void aux_initialize(
+ struct dce110_link_encoder *enc110)
+{
+ struct dc_context *ctx = enc110->base.ctx;
+ enum hpd_source_id hpd_source = enc110->base.hpd_source;
+ uint32_t addr = AUX_REG(AUX_CONTROL);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(value, hpd_source, AUX_CONTROL, AUX_HPD_SEL);
+ set_reg_field_value(value, 0, AUX_CONTROL, AUX_LS_READ_EN);
+ dm_write_reg(ctx, addr, value);
+
+ addr = AUX_REG(AUX_DPHY_RX_CONTROL0);
+ value = dm_read_reg(ctx, addr);
+
+ /* 1/4 window (the maximum allowed) */
+ set_reg_field_value(value, 1,
+ AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW);
+ dm_write_reg(ctx, addr, value);
+
+}
+
+void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+ bool exit_link_training_required)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ if (exit_link_training_required)
+ REG_UPDATE(DP_DPHY_FAST_TRAINING,
+ DPHY_RX_FAST_TRAINING_CAPABLE, 1);
+ else {
+ REG_UPDATE(DP_DPHY_FAST_TRAINING,
+ DPHY_RX_FAST_TRAINING_CAPABLE, 0);
+ /*In DCE 11, we are able to pre-program a Force SR register
+ * to be able to trigger SR symbol after 5 idle patterns
+ * transmitted. Upon PSR Exit, DMCU can trigger
+ * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to
+ * DPHY_LOAD_BS_COUNT_START and the internal counter
+ * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be
+ * replaced by SR symbol once.
+ */
+
+ REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5);
+ }
+}
+
+void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ REG_UPDATE_2(DP_SEC_CNTL1,
+ DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline,
+ DP_SEC_GSP0_PRIORITY, 1);
+}
+
+static bool is_dig_enabled(const struct dce110_link_encoder *enc110)
+{
+ uint32_t value;
+
+ REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value);
+ return value;
+}
+
+static void link_encoder_disable(struct dce110_link_encoder *enc110)
+{
+ /* reset training pattern */
+ REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0,
+ DPHY_TRAINING_PATTERN_SEL, 0);
+
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+
+ /* reset panel mode */
+ setup_panel_mode(enc110, DP_PANEL_MODE_DEFAULT);
+}
+
+static void hpd_initialize(
+ struct dce110_link_encoder *enc110)
+{
+ /* Associate HPD with DIG_BE */
+ enum hpd_source_id hpd_source = enc110->base.hpd_source;
+
+ REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source);
+}
+
+bool dce110_link_encoder_validate_dvi_output(
+ const struct dce110_link_encoder *enc110,
+ enum signal_type connector_signal,
+ enum signal_type signal,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK;
+
+ if (signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ max_pixel_clock *= 2;
+
+ /* This handles the case of HDMI downgrade to DVI we don't want to
+ * we don't want to cap the pixel clock if the DDI is not DVI.
+ */
+ if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK &&
+ connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
+ max_pixel_clock = enc110->base.features.max_hdmi_pixel_clock;
+
+ /* DVI only support RGB pixel encoding */
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ /*connect DVI via adpater's HDMI connector*/
+ if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) &&
+ signal != SIGNAL_TYPE_HDMI_TYPE_A &&
+ crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK)
+ return false;
+ if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+ return false;
+
+ if (crtc_timing->pix_clk_khz > max_pixel_clock)
+ return false;
+
+ /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ break;
+ case COLOR_DEPTH_101010:
+ case COLOR_DEPTH_161616:
+ if (signal != SIGNAL_TYPE_DVI_DUAL_LINK)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool dce110_link_encoder_validate_hdmi_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing,
+ int adjusted_pix_clk_khz)
+{
+ enum dc_color_depth max_deep_color =
+ enc110->base.features.max_hdmi_deep_color;
+
+ if (max_deep_color < crtc_timing->display_color_depth)
+ return false;
+
+ if (crtc_timing->display_color_depth < COLOR_DEPTH_888)
+ return false;
+ if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK)
+ return false;
+
+ if ((adjusted_pix_clk_khz == 0) ||
+ (adjusted_pix_clk_khz > enc110->base.features.max_hdmi_pixel_clock))
+ return false;
+
+ /* DCE11 HW does not support 420 */
+ if (!enc110->base.features.ycbcr420_supported &&
+ crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ return false;
+
+ if (!enc110->base.features.flags.bits.HDMI_6GB_EN &&
+ adjusted_pix_clk_khz >= 300000)
+ return false;
+ return true;
+}
+
+bool dce110_link_encoder_validate_dp_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* default RGB only */
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+
+ if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE)
+ return true;
+
+ /* for DCE 8.x or later DP Y-only feature,
+ * we need ASIC cap + FeatureSupportDPYonly, not support 666 */
+ if (crtc_timing->flags.Y_ONLY &&
+ enc110->base.features.flags.bits.IS_YCBCR_CAPABLE &&
+ crtc_timing->display_color_depth != COLOR_DEPTH_666)
+ return true;
+
+ return false;
+}
+
+void dce110_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+
+ enc110->base.funcs = &dce110_lnk_enc_funcs;
+ enc110->base.ctx = init_data->ctx;
+ enc110->base.id = init_data->encoder;
+
+ enc110->base.hpd_source = init_data->hpd_source;
+ enc110->base.connector = init_data->connector;
+
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc110->base.features = *enc_features;
+
+ enc110->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+/* if (dal_adapter_service_is_feature_supported(as,
+ FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ enc110->base.features.flags.bits.
+ DP_SINK_DETECT_POLL_DATA_PIN = true;*/
+
+ enc110->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc110->link_regs = link_regs;
+ enc110->aux_regs = aux_regs;
+ enc110->hpd_regs = hpd_regs;
+
+ switch (enc110->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc110->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc110->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ enc110->base.preferred_engine = ENGINE_ID_DIGC;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ enc110->base.preferred_engine = ENGINE_ID_DIGD;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ enc110->base.preferred_engine = ENGINE_ID_DIGE;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ enc110->base.preferred_engine = ENGINE_ID_DIGF;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ enc110->base.preferred_engine = ENGINE_ID_DIGG;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* Override features with DCE-specific values */
+ if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
+ enc110->base.ctx->dc_bios, enc110->base.id,
+ &bp_cap_info)) {
+ enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ }
+}
+
+bool dce110_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ bool is_valid;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ is_valid = dce110_link_encoder_validate_dvi_output(
+ enc110,
+ stream->sink->link->connector_signal,
+ stream->signal,
+ &stream->timing);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ is_valid = dce110_link_encoder_validate_hdmi_output(
+ enc110,
+ &stream->timing,
+ stream->phy_pix_clk);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ is_valid = dce110_link_encoder_validate_dp_output(
+ enc110, &stream->timing);
+ break;
+ case SIGNAL_TYPE_EDP:
+ is_valid =
+ (stream->timing.
+ pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ is_valid = true;
+ break;
+ default:
+ is_valid = false;
+ break;
+ }
+
+ return is_valid;
+}
+
+void dce110_link_encoder_hw_init(
+ struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ cntl.action = TRANSMITTER_CONTROL_INIT;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.connector_obj_id = enc110->base.connector;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.coherent = false;
+ cntl.hpd_sel = enc110->base.hpd_source;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (enc110->base.connector.id == CONNECTOR_ID_LVDS) {
+ cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ ASSERT(result == BP_RESULT_OK);
+
+ } else if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+ ctx->dc->hwss.edp_power_control(enc, true);
+ }
+ aux_initialize(enc110);
+
+ /* reinitialize HPD.
+ * hpd_initialize() will pass DIG_FE id to HW context.
+ * All other routine within HW context will use fe_engine_offset
+ * as DIG_FE id even caller pass DIG_FE id.
+ * So this routine must be called first. */
+ hpd_initialize(enc110);
+}
+
+void dce110_link_encoder_destroy(struct link_encoder **enc)
+{
+ kfree(TO_DCE110_LINK_ENC(*enc));
+ *enc = NULL;
+}
+
+void dce110_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ switch (signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ /* DP SST */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0);
+ break;
+ case SIGNAL_TYPE_LVDS:
+ /* LVDS */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ /* TMDS-DVI */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2);
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* TMDS-HDMI */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* DP MST */
+ REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ /* invalid mode ! */
+ break;
+ }
+
+}
+
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ if (hdmi) {
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.lanes_number = 4;
+ } else if (dual_link) {
+ cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ cntl.lanes_number = 8;
+ } else {
+ cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.lanes_number = 4;
+ }
+ cntl.hpd_sel = enc110->base.hpd_source;
+
+ cntl.pixel_clock = pixel_clock;
+ cntl.color_depth = color_depth;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ configure_encoder(enc110, link_settings);
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = enc->preferred_engine;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Enable the PHY */
+
+ /* number_of_lanes is used for pixel clock adjust,
+ * but it's not passed to asic_control.
+ * We need to set number of lanes manually.
+ */
+ configure_encoder(enc110, link_settings);
+
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ;
+ /* TODO: check if undefined works */
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+/*
+ * @brief
+ * Disable transmitter and its encoder
+ */
+void dce110_link_encoder_disable_output(
+ struct link_encoder *enc,
+ enum signal_type signal,
+ struct dc_link *link)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ if (!is_dig_enabled(enc110)) {
+ /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
+ return;
+ }
+ if (enc110->base.connector.id == CONNECTOR_ID_EDP)
+ ctx->dc->hwss.edp_backlight_control(link, false);
+ /* Power-down RX and disable GPU PHY should be paired.
+ * Disabling PHY without powering down RX may cause
+ * symbol lock loss, on which we will get DP Sink interrupt. */
+
+ /* There is a case for the DP active dongles
+ * where we want to disable the PHY but keep RX powered,
+ * for those we need to ignore DP Sink interrupt
+ * by checking lane count that has been set
+ * on the last do_enable_output(). */
+
+ /* disable transmitter */
+ cntl.action = TRANSMITTER_CONTROL_DISABLE;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.signal = signal;
+ cntl.connector_obj_id = enc110->base.connector;
+
+ result = link_transmitter_control(enc110, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* disable encoder */
+ if (dc_is_dp_signal(signal))
+ link_encoder_disable(enc110);
+
+ if (enc110->base.connector.id == CONNECTOR_ID_EDP) {
+ /* power down eDP panel */
+ /* TODO: Power control cause regression, we should implement
+ * it properly, for now just comment it.
+ *
+ * link_encoder_edp_wait_for_hpd_ready(
+ link_enc,
+ link_enc->connector,
+ false);
+
+ * link_encoder_edp_power_control(
+ link_enc, false); */
+ }
+}
+
+void dce110_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ union dpcd_training_lane_set training_lane_set = { { 0 } };
+ int32_t lane = 0;
+ struct bp_transmitter_control cntl = { 0 };
+
+ if (!link_settings) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+ cntl.transmitter = enc110->base.transmitter;
+ cntl.connector_obj_id = enc110->base.connector;
+ cntl.lanes_number = link_settings->link_settings.lane_count;
+ cntl.hpd_sel = enc110->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_settings.link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+
+ for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) {
+ /* translate lane settings */
+
+ training_lane_set.bits.VOLTAGE_SWING_SET =
+ link_settings->lane_settings[lane].VOLTAGE_SWING;
+ training_lane_set.bits.PRE_EMPHASIS_SET =
+ link_settings->lane_settings[lane].PRE_EMPHASIS;
+
+ /* post cursor 2 setting only applies to HBR2 link rate */
+ if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) {
+ /* this is passed to VBIOS
+ * to program post cursor 2 level */
+
+ training_lane_set.bits.POST_CURSOR2_SET =
+ link_settings->lane_settings[lane].POST_CURSOR2;
+ }
+
+ cntl.lane_select = lane;
+ cntl.lane_settings = training_lane_set.raw;
+
+ /* call VBIOS table to set voltage swing and pre-emphasis */
+ link_transmitter_control(enc110, &cntl);
+ }
+}
+
+/* set DP PHY test and training patterns */
+void dce110_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+
+ switch (param->dp_phy_pattern) {
+ case DP_TEST_PATTERN_TRAINING_PATTERN1:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN2:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN3:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2);
+ break;
+ case DP_TEST_PATTERN_TRAINING_PATTERN4:
+ dce110_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3);
+ break;
+ case DP_TEST_PATTERN_D102:
+ set_dp_phy_pattern_d102(enc110);
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ set_dp_phy_pattern_symbol_error(enc110);
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ set_dp_phy_pattern_prbs7(enc110);
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ set_dp_phy_pattern_80bit_custom(
+ enc110, param->custom_pattern);
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 1);
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 2);
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc110, 3);
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE: {
+ set_dp_phy_pattern_passthrough_mode(
+ enc110, param->dp_panel_mode);
+ break;
+ }
+
+ default:
+ /* invalid phy pattern */
+ ASSERT_CRITICAL(false);
+ break;
+ }
+}
+
+static void fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots)
+{
+ const struct stream_encoder *stream_enc = stream_allocation->stream_enc;
+
+ if (stream_enc) {
+ *src = stream_enc->id;
+ *slots = stream_allocation->slot_count;
+ } else {
+ *src = 0;
+ *slots = 0;
+ }
+}
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ uint32_t value0 = 0;
+ uint32_t value1 = 0;
+ uint32_t value2 = 0;
+ uint32_t slots = 0;
+ uint32_t src = 0;
+ uint32_t retries = 0;
+
+ /* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/
+
+ /* --- Set MSE Stream Attribute -
+ * Setup VC Payload Table on Tx Side,
+ * Issue allocation change trigger
+ * to commit payload on both tx and rx side */
+
+ /* we should clean-up table each time */
+
+ if (table->stream_count >= 1) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[0],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT0,
+ DP_MSE_SAT_SRC0, src,
+ DP_MSE_SAT_SLOT_COUNT0, slots);
+
+ if (table->stream_count >= 2) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[1],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT0,
+ DP_MSE_SAT_SRC1, src,
+ DP_MSE_SAT_SLOT_COUNT1, slots);
+
+ if (table->stream_count >= 3) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[2],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT1,
+ DP_MSE_SAT_SRC2, src,
+ DP_MSE_SAT_SLOT_COUNT2, slots);
+
+ if (table->stream_count >= 4) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[3],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_MSE_SAT1,
+ DP_MSE_SAT_SRC3, src,
+ DP_MSE_SAT_SLOT_COUNT3, slots);
+
+ /* --- wait for transaction finish */
+
+ /* send allocation change trigger (ACT) ?
+ * this step first sends the ACT,
+ * then double buffers the SAT into the hardware
+ * making the new allocation active on the DP MST mode link */
+
+
+ /* DP_MSE_SAT_UPDATE:
+ * 0 - No Action
+ * 1 - Update SAT with trigger
+ * 2 - Update SAT without trigger */
+
+ REG_UPDATE(DP_MSE_SAT_UPDATE,
+ DP_MSE_SAT_UPDATE, 1);
+
+ /* wait for update to complete
+ * (i.e. DP_MSE_SAT_UPDATE field is reset to 0)
+ * then wait for the transmission
+ * of at least 16 MTP headers on immediate local link.
+ * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0
+ * a value of 1 indicates that DP MST mode
+ * is in the 16 MTP keepout region after a VC has been added.
+ * MST stream bandwidth (VC rate) can be configured
+ * after this bit is cleared */
+
+ do {
+ udelay(10);
+
+ value0 = REG_READ(DP_MSE_SAT_UPDATE);
+
+ REG_GET(DP_MSE_SAT_UPDATE,
+ DP_MSE_SAT_UPDATE, &value1);
+
+ REG_GET(DP_MSE_SAT_UPDATE,
+ DP_MSE_16_MTP_KEEPOUT, &value2);
+
+ /* bit field DP_MSE_SAT_UPDATE is set to 1 already */
+ if (!value1 && !value2)
+ break;
+ ++retries;
+ } while (retries < DP_MST_UPDATE_MAX_RETRY);
+}
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ uint32_t field;
+
+ if (engine != ENGINE_ID_UNKNOWN) {
+
+ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field);
+
+ if (connect)
+ field |= get_frontend_source(engine);
+ else
+ field &= ~get_frontend_source(engine);
+
+ REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field);
+ }
+}
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+ uint32_t hpd_enable = 0;
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ get_reg_field_value(hpd_enable, DC_HPD_CONTROL, DC_HPD_EN);
+
+ if (hpd_enable == 0)
+ set_reg_field_value(value, 1, DC_HPD_CONTROL, DC_HPD_EN);
+}
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t addr = HPD_REG(DC_HPD_CONTROL);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
new file mode 100644
index 000000000000..494067dedd03
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCE110_H__
+#define __DC_LINK_ENCODER__DCE110_H__
+
+#include "link_encoder.h"
+
+#define TO_DCE110_LINK_ENC(link_encoder)\
+ container_of(link_encoder, struct dce110_link_encoder, base)
+
+/* Not found regs in dce120 spec
+ * BIOS_SCRATCH_2
+ * DP_DPHY_INTERNAL_CTRL
+ */
+
+#define AUX_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id)
+
+#define HPD_REG_LIST(id)\
+ SRI(DC_HPD_CONTROL, HPD, id)
+
+#define LE_COMMON_REG_LIST_BASE(id) \
+ SR(DMCU_RAM_ACCESS_CTRL), \
+ SR(DMCU_IRAM_RD_CTRL), \
+ SR(DMCU_IRAM_RD_DATA), \
+ SR(DMCU_INTERRUPT_TO_UC_EN_MASK), \
+ SRI(DIG_BE_CNTL, DIG, id), \
+ SRI(DIG_BE_EN_CNTL, DIG, id), \
+ SRI(DP_CONFIG, DP, id), \
+ SRI(DP_DPHY_CNTL, DP, id), \
+ SRI(DP_DPHY_PRBS_CNTL, DP, id), \
+ SRI(DP_DPHY_SCRAM_CNTL, DP, id),\
+ SRI(DP_DPHY_SYM0, DP, id), \
+ SRI(DP_DPHY_SYM1, DP, id), \
+ SRI(DP_DPHY_SYM2, DP, id), \
+ SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \
+ SRI(DP_LINK_CNTL, DP, id), \
+ SRI(DP_LINK_FRAMING_CNTL, DP, id), \
+ SRI(DP_MSE_SAT0, DP, id), \
+ SRI(DP_MSE_SAT1, DP, id), \
+ SRI(DP_MSE_SAT2, DP, id), \
+ SRI(DP_MSE_SAT_UPDATE, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_DPHY_FAST_TRAINING, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id)
+
+#define LE_COMMON_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE80_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ LE_COMMON_REG_LIST_BASE(id)
+
+#define LE_DCE100_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE110_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCE120_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id), \
+ SR(DCI_MEM_PWR_STATUS)
+
+#define LE_DCN10_REG_LIST(id)\
+ LE_COMMON_REG_LIST_BASE(id), \
+ SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
+ SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
+
+struct dce110_link_enc_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_DPHY_RX_CONTROL0;
+};
+
+struct dce110_link_enc_hpd_registers {
+ uint32_t DC_HPD_CONTROL;
+};
+
+struct dce110_link_enc_registers {
+ /* DMCU registers */
+ uint32_t MASTER_COMM_DATA_REG1;
+ uint32_t MASTER_COMM_DATA_REG2;
+ uint32_t MASTER_COMM_DATA_REG3;
+ uint32_t MASTER_COMM_CMD_REG;
+ uint32_t MASTER_COMM_CNTL_REG;
+ uint32_t DMCU_RAM_ACCESS_CTRL;
+ uint32_t DCI_MEM_PWR_STATUS;
+ uint32_t DMU_MEM_PWR_CNTL;
+ uint32_t DMCU_IRAM_RD_CTRL;
+ uint32_t DMCU_IRAM_RD_DATA;
+ uint32_t DMCU_INTERRUPT_TO_UC_EN_MASK;
+
+ /* Common DP registers */
+ uint32_t DIG_BE_CNTL;
+ uint32_t DIG_BE_EN_CNTL;
+ uint32_t DP_CONFIG;
+ uint32_t DP_DPHY_CNTL;
+ uint32_t DP_DPHY_INTERNAL_CTRL;
+ uint32_t DP_DPHY_PRBS_CNTL;
+ uint32_t DP_DPHY_SCRAM_CNTL;
+ uint32_t DP_DPHY_SYM0;
+ uint32_t DP_DPHY_SYM1;
+ uint32_t DP_DPHY_SYM2;
+ uint32_t DP_DPHY_TRAINING_PATTERN_SEL;
+ uint32_t DP_LINK_CNTL;
+ uint32_t DP_LINK_FRAMING_CNTL;
+ uint32_t DP_MSE_SAT0;
+ uint32_t DP_MSE_SAT1;
+ uint32_t DP_MSE_SAT2;
+ uint32_t DP_MSE_SAT_UPDATE;
+ uint32_t DP_SEC_CNTL;
+ uint32_t DP_VID_STREAM_CNTL;
+ uint32_t DP_DPHY_FAST_TRAINING;
+ uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
+ uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
+ uint32_t DP_SEC_CNTL1;
+};
+
+struct dce110_link_encoder {
+ struct link_encoder base;
+ const struct dce110_link_enc_registers *link_regs;
+ const struct dce110_link_enc_aux_registers *aux_regs;
+ const struct dce110_link_enc_hpd_registers *hpd_regs;
+};
+
+
+void dce110_link_encoder_construct(
+ struct dce110_link_encoder *enc110,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dce110_link_enc_registers *link_regs,
+ const struct dce110_link_enc_aux_registers *aux_regs,
+ const struct dce110_link_enc_hpd_registers *hpd_regs);
+
+bool dce110_link_encoder_validate_dvi_output(
+ const struct dce110_link_encoder *enc110,
+ enum signal_type connector_signal,
+ enum signal_type signal,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_dp_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_wireless_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing);
+
+bool dce110_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream);
+
+/****************** HW programming ************************/
+
+/* initialize HW */ /* why do we initialze aux in here? */
+void dce110_link_encoder_hw_init(struct link_encoder *enc);
+
+void dce110_link_encoder_destroy(struct link_encoder **enc);
+
+/* program DIG_MODE in DIG_BE */
+/* TODO can this be combined with enable_output? */
+void dce110_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal);
+
+/* enables TMDS PHY output */
+/* TODO: still need depth or just pass in adjusted pixel clock? */
+void dce110_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock);
+
+/* enables DP PHY output */
+void dce110_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/* enables DP PHY output in MST mode */
+void dce110_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+
+/* disable PHY output */
+void dce110_link_encoder_disable_output(
+ struct link_encoder *link_enc,
+ enum signal_type signal,
+ struct dc_link *link);
+
+/* set DP lane settings */
+void dce110_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings);
+
+void dce110_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param);
+
+/* programs DP MST VC payload allocation */
+void dce110_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+void dce110_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect);
+
+void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
+ struct link_encoder *enc,
+ uint32_t index);
+
+void dce110_link_encoder_enable_hpd(struct link_encoder *enc);
+
+void dce110_link_encoder_disable_hpd(struct link_encoder *enc);
+
+void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc,
+ bool exit_link_training_required);
+
+void dce110_psr_program_secondary_packet(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline);
+
+#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
new file mode 100644
index 000000000000..0790f25c7b3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -0,0 +1,700 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_mem_input.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+
+#define CTX \
+ dce_mi->base.ctx
+#define REG(reg)\
+ dce_mi->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_mi->shifts->field_name, dce_mi->masks->field_name
+
+struct pte_setting {
+ unsigned int bpp;
+ unsigned int page_width;
+ unsigned int page_height;
+ unsigned char min_pte_before_flip_horiz_scan;
+ unsigned char min_pte_before_flip_vert_scan;
+ unsigned char pte_req_per_chunk;
+ unsigned char param_6;
+ unsigned char param_7;
+ unsigned char param_8;
+};
+
+enum mi_bits_per_pixel {
+ mi_bpp_8 = 0,
+ mi_bpp_16,
+ mi_bpp_32,
+ mi_bpp_64,
+ mi_bpp_count,
+};
+
+enum mi_tiling_format {
+ mi_tiling_linear = 0,
+ mi_tiling_1D,
+ mi_tiling_2D,
+ mi_tiling_count,
+};
+
+static const struct pte_setting pte_settings[mi_tiling_count][mi_bpp_count] = {
+ [mi_tiling_linear] = {
+ { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
+ { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
+ { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
+ { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
+ },
+ [mi_tiling_1D] = {
+ { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
+ { 16, 256, 8, 2, 0, 1, 0, 0, 0},
+ { 32, 128, 8, 4, 0, 1, 0, 0, 0},
+ { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
+ },
+ [mi_tiling_2D] = {
+ { 8, 64, 64, 8, 8, 1, 4, 0, 0},
+ { 16, 64, 32, 8, 16, 1, 8, 0, 0},
+ { 32, 32, 32, 16, 16, 1, 8, 0, 0},
+ { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
+ },
+};
+
+static enum mi_bits_per_pixel get_mi_bpp(
+ enum surface_pixel_format format)
+{
+ if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+ return mi_bpp_64;
+ else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888)
+ return mi_bpp_32;
+ else if (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB1555)
+ return mi_bpp_16;
+ else
+ return mi_bpp_8;
+}
+
+static enum mi_tiling_format get_mi_tiling(
+ union dc_tiling_info *tiling_info)
+{
+ switch (tiling_info->gfx8.array_mode) {
+ case DC_ARRAY_1D_TILED_THIN1:
+ case DC_ARRAY_1D_TILED_THICK:
+ case DC_ARRAY_PRT_TILED_THIN1:
+ return mi_tiling_1D;
+ case DC_ARRAY_2D_TILED_THIN1:
+ case DC_ARRAY_2D_TILED_THICK:
+ case DC_ARRAY_2D_TILED_X_THICK:
+ case DC_ARRAY_PRT_2D_TILED_THIN1:
+ case DC_ARRAY_PRT_2D_TILED_THICK:
+ return mi_tiling_2D;
+ case DC_ARRAY_LINEAR_GENERAL:
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ return mi_tiling_linear;
+ default:
+ return mi_tiling_2D;
+ }
+}
+
+static bool is_vert_scan(enum dc_rotation_angle rotation)
+{
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void dce_mi_program_pte_vm(
+ struct mem_input *mi,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ enum mi_bits_per_pixel mi_bpp = get_mi_bpp(format);
+ enum mi_tiling_format mi_tiling = get_mi_tiling(tiling_info);
+ const struct pte_setting *pte = &pte_settings[mi_tiling][mi_bpp];
+
+ unsigned int page_width = log_2(pte->page_width);
+ unsigned int page_height = log_2(pte->page_height);
+ unsigned int min_pte_before_flip = is_vert_scan(rotation) ?
+ pte->min_pte_before_flip_vert_scan :
+ pte->min_pte_before_flip_horiz_scan;
+
+ REG_UPDATE(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT,
+ GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, 0xff);
+
+ REG_UPDATE_3(DVMM_PTE_CONTROL,
+ DVMM_PAGE_WIDTH, page_width,
+ DVMM_PAGE_HEIGHT, page_height,
+ DVMM_MIN_PTE_BEFORE_FLIP, min_pte_before_flip);
+
+ REG_UPDATE_2(DVMM_PTE_ARB_CONTROL,
+ DVMM_PTE_REQ_PER_CHUNK, pte->pte_req_per_chunk,
+ DVMM_MAX_PTE_REQ_OUTSTANDING, 0xff);
+}
+
+static void program_urgency_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t urgency_low_wm,
+ uint32_t urgency_high_wm)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK, wm_select);
+
+ REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0,
+ URGENCY_LOW_WATERMARK, urgency_low_wm,
+ URGENCY_HIGH_WATERMARK, urgency_high_wm);
+}
+
+static void program_nbp_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t nbp_wm)
+{
+ if (REG(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL)) {
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE_3(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE, 1,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+ REG_UPDATE(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK, nbp_wm);
+ }
+
+ if (REG(DPG_PIPE_LOW_POWER_CONTROL)) {
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ PSTATE_CHANGE_WATERMARK_MASK, wm_select);
+
+ REG_UPDATE_3(DPG_PIPE_LOW_POWER_CONTROL,
+ PSTATE_CHANGE_ENABLE, 1,
+ PSTATE_CHANGE_URGENT_DURING_REQUEST, 1,
+ PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, 1);
+
+ REG_UPDATE(DPG_PIPE_LOW_POWER_CONTROL,
+ PSTATE_CHANGE_WATERMARK, nbp_wm);
+ }
+}
+
+static void program_stutter_watermark(
+ struct dce_mem_input *dce_mi,
+ uint32_t wm_select,
+ uint32_t stutter_mark)
+{
+ REG_UPDATE(DPG_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select);
+
+ if (REG(DPG_PIPE_STUTTER_CONTROL2))
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+ else
+ REG_UPDATE(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark);
+}
+
+static void dce_mi_program_display_marks(
+ struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ program_urgency_watermark(dce_mi, 2, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 1, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */
+ program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */
+
+ program_stutter_watermark(dce_mi, 2, stutter.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter.d_mark); /* set d */
+}
+
+static void dce120_mi_program_display_marks(struct mem_input *mi,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1;
+
+ program_urgency_watermark(dce_mi, 0, /* set a */
+ urgent.a_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 1, /* set b */
+ urgent.b_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 2, /* set c */
+ urgent.c_mark, total_dest_line_time_ns);
+ program_urgency_watermark(dce_mi, 3, /* set d */
+ urgent.d_mark, total_dest_line_time_ns);
+
+ REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE, stutter_en,
+ STUTTER_IGNORE_FBC, 1);
+ program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */
+ program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */
+ program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */
+ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */
+
+ program_stutter_watermark(dce_mi, 0, stutter.a_mark); /* set a */
+ program_stutter_watermark(dce_mi, 1, stutter.b_mark); /* set b */
+ program_stutter_watermark(dce_mi, 2, stutter.c_mark); /* set c */
+ program_stutter_watermark(dce_mi, 3, stutter.d_mark); /* set d */
+}
+
+static void program_tiling(
+ struct dce_mem_input *dce_mi, const union dc_tiling_info *info)
+{
+ if (dce_mi->masks->GRPH_SW_MODE) { /* GFX9 */
+ REG_UPDATE_6(GRPH_CONTROL,
+ GRPH_SW_MODE, info->gfx9.swizzle,
+ GRPH_NUM_BANKS, log_2(info->gfx9.num_banks),
+ GRPH_NUM_SHADER_ENGINES, log_2(info->gfx9.num_shader_engines),
+ GRPH_NUM_PIPES, log_2(info->gfx9.num_pipes),
+ GRPH_COLOR_EXPANSION_MODE, 1,
+ GRPH_SE_ENABLE, info->gfx9.shaderEnable);
+ /* TODO: DCP0_GRPH_CONTROL__GRPH_SE_ENABLE where to get info
+ GRPH_SE_ENABLE, 1,
+ GRPH_Z, 0);
+ */
+ }
+
+ if (dce_mi->masks->GRPH_ARRAY_MODE) { /* GFX8 */
+ REG_UPDATE_9(GRPH_CONTROL,
+ GRPH_NUM_BANKS, info->gfx8.num_banks,
+ GRPH_BANK_WIDTH, info->gfx8.bank_width,
+ GRPH_BANK_HEIGHT, info->gfx8.bank_height,
+ GRPH_MACRO_TILE_ASPECT, info->gfx8.tile_aspect,
+ GRPH_TILE_SPLIT, info->gfx8.tile_split,
+ GRPH_MICRO_TILE_MODE, info->gfx8.tile_mode,
+ GRPH_PIPE_CONFIG, info->gfx8.pipe_config,
+ GRPH_ARRAY_MODE, info->gfx8.array_mode,
+ GRPH_COLOR_EXPANSION_MODE, 1);
+ /* 01 - DCP_GRPH_COLOR_EXPANSION_MODE_ZEXP: zero expansion for YCbCr */
+ /*
+ GRPH_Z, 0);
+ */
+ }
+}
+
+
+static void program_size_and_rotation(
+ struct dce_mem_input *dce_mi,
+ enum dc_rotation_angle rotation,
+ const union plane_size *plane_size)
+{
+ const struct rect *in_rect = &plane_size->grph.surface_size;
+ struct rect hw_rect = plane_size->grph.surface_size;
+ const uint32_t rotation_angles[ROTATION_ANGLE_COUNT] = {
+ [ROTATION_ANGLE_0] = 0,
+ [ROTATION_ANGLE_90] = 1,
+ [ROTATION_ANGLE_180] = 2,
+ [ROTATION_ANGLE_270] = 3,
+ };
+
+ if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) {
+ hw_rect.x = in_rect->y;
+ hw_rect.y = in_rect->x;
+
+ hw_rect.height = in_rect->width;
+ hw_rect.width = in_rect->height;
+ }
+
+ REG_SET(GRPH_X_START, 0,
+ GRPH_X_START, hw_rect.x);
+
+ REG_SET(GRPH_Y_START, 0,
+ GRPH_Y_START, hw_rect.y);
+
+ REG_SET(GRPH_X_END, 0,
+ GRPH_X_END, hw_rect.width);
+
+ REG_SET(GRPH_Y_END, 0,
+ GRPH_Y_END, hw_rect.height);
+
+ REG_SET(GRPH_PITCH, 0,
+ GRPH_PITCH, plane_size->grph.surface_pitch);
+
+ REG_SET(HW_ROTATION, 0,
+ GRPH_ROTATION_ANGLE, rotation_angles[rotation]);
+}
+
+static void program_grph_pixel_format(
+ struct dce_mem_input *dce_mi,
+ enum surface_pixel_format format)
+{
+ uint32_t red_xbar = 0, blue_xbar = 0; /* no swap */
+ uint32_t grph_depth = 0, grph_format = 0;
+ uint32_t sign = 0, floating = 0;
+
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888 ||
+ /*todo: doesn't look like we handle BGRA here,
+ * should problem swap endian*/
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ /* ABGR formats */
+ red_xbar = 2;
+ blue_xbar = 2;
+ }
+
+ REG_SET_2(GRPH_SWAP_CNTL, 0,
+ GRPH_RED_CROSSBAR, red_xbar,
+ GRPH_BLUE_CROSSBAR, blue_xbar);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ grph_depth = 0;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ grph_depth = 1;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ grph_depth = 1;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ grph_depth = 2;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ sign = 1;
+ floating = 1;
+ /* no break */
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ grph_depth = 3;
+ grph_format = 0;
+ break;
+ default:
+ DC_ERR("unsupported grph pixel format");
+ break;
+ }
+
+ REG_UPDATE_2(GRPH_CONTROL,
+ GRPH_DEPTH, grph_depth,
+ GRPH_FORMAT, grph_format);
+
+ REG_UPDATE_4(PRESCALE_GRPH_CONTROL,
+ GRPH_PRESCALE_SELECT, floating,
+ GRPH_PRESCALE_R_SIGN, sign,
+ GRPH_PRESCALE_G_SIGN, sign,
+ GRPH_PRESCALE_B_SIGN, sign);
+}
+
+static void dce_mi_program_surface_config(
+ struct mem_input *mi,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1);
+
+ program_tiling(dce_mi, tiling_info);
+ program_size_and_rotation(dce_mi, rotation, plane_size);
+
+ if (format >= SURFACE_PIXEL_FORMAT_GRPH_BEGIN &&
+ format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ program_grph_pixel_format(dce_mi, format);
+}
+
+static uint32_t get_dmif_switch_time_us(
+ uint32_t h_total,
+ uint32_t v_total,
+ uint32_t pix_clk_khz)
+{
+ uint32_t frame_time;
+ uint32_t pixels_per_second;
+ uint32_t pixels_per_frame;
+ uint32_t refresh_rate;
+ const uint32_t us_in_sec = 1000000;
+ const uint32_t min_single_frame_time_us = 30000;
+ /*return double of frame time*/
+ const uint32_t single_frame_time_multiplier = 2;
+
+ if (!h_total || v_total || !pix_clk_khz)
+ return single_frame_time_multiplier * min_single_frame_time_us;
+
+ /*TODO: should we use pixel format normalized pixel clock here?*/
+ pixels_per_second = pix_clk_khz * 1000;
+ pixels_per_frame = h_total * v_total;
+
+ if (!pixels_per_second || !pixels_per_frame) {
+ /* avoid division by zero */
+ ASSERT(pixels_per_frame);
+ ASSERT(pixels_per_second);
+ return single_frame_time_multiplier * min_single_frame_time_us;
+ }
+
+ refresh_rate = pixels_per_second / pixels_per_frame;
+
+ if (!refresh_rate) {
+ /* avoid division by zero*/
+ ASSERT(refresh_rate);
+ return single_frame_time_multiplier * min_single_frame_time_us;
+ }
+
+ frame_time = us_in_sec / refresh_rate;
+
+ if (frame_time < min_single_frame_time_us)
+ frame_time = min_single_frame_time_us;
+
+ frame_time *= single_frame_time_multiplier;
+
+ return frame_time;
+}
+
+static void dce_mi_allocate_dmif(
+ struct mem_input *mi,
+ uint32_t h_total,
+ uint32_t v_total,
+ uint32_t pix_clk_khz,
+ uint32_t total_stream_num)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ const uint32_t retry_delay = 10;
+ uint32_t retry_count = get_dmif_switch_time_us(
+ h_total,
+ v_total,
+ pix_clk_khz) / retry_delay;
+
+ uint32_t pix_dur;
+ uint32_t buffers_allocated;
+ uint32_t dmif_buffer_control;
+
+ dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+ if (buffers_allocated == 2)
+ return;
+
+ REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+ DMIF_BUFFERS_ALLOCATED, 2);
+
+ REG_WAIT(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+ retry_delay, retry_count);
+
+ if (pix_clk_khz != 0) {
+ pix_dur = 1000000000ULL / pix_clk_khz;
+
+ REG_UPDATE(DPG_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION, pix_dur);
+ }
+
+ if (dce_mi->wa.single_head_rdreq_dmif_limit) {
+ uint32_t eanble = (total_stream_num > 1) ? 0 :
+ dce_mi->wa.single_head_rdreq_dmif_limit;
+
+ REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+ ENABLE, eanble);
+ }
+}
+
+static void dce_mi_free_dmif(
+ struct mem_input *mi,
+ uint32_t total_stream_num)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi);
+ uint32_t buffers_allocated;
+ uint32_t dmif_buffer_control;
+
+ dmif_buffer_control = REG_GET(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATED, &buffers_allocated);
+
+ if (buffers_allocated == 0)
+ return;
+
+ REG_SET(DMIF_BUFFER_CONTROL, dmif_buffer_control,
+ DMIF_BUFFERS_ALLOCATED, 0);
+
+ REG_WAIT(DMIF_BUFFER_CONTROL,
+ DMIF_BUFFERS_ALLOCATION_COMPLETED, 1,
+ 10, 3500);
+
+ if (dce_mi->wa.single_head_rdreq_dmif_limit) {
+ uint32_t eanble = (total_stream_num > 1) ? 0 :
+ dce_mi->wa.single_head_rdreq_dmif_limit;
+
+ REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT,
+ ENABLE, eanble);
+ }
+}
+
+
+static void program_sec_addr(
+ struct dce_mem_input *dce_mi,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ /*high register MUST be programmed first*/
+ REG_SET(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
+ address.high_part);
+
+ REG_SET_2(GRPH_SECONDARY_SURFACE_ADDRESS, 0,
+ GRPH_SECONDARY_SURFACE_ADDRESS, address.low_part >> 8,
+ GRPH_SECONDARY_DFQ_ENABLE, 0);
+}
+
+static void program_pri_addr(
+ struct dce_mem_input *dce_mi,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ /*high register MUST be programmed first*/
+ REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
+ address.high_part);
+
+ REG_SET(GRPH_PRIMARY_SURFACE_ADDRESS, 0,
+ GRPH_PRIMARY_SURFACE_ADDRESS,
+ address.low_part >> 8);
+}
+
+
+static bool dce_mi_is_flip_pending(struct mem_input *mem_input)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input);
+ uint32_t update_pending;
+
+ REG_GET(GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, &update_pending);
+ if (update_pending)
+ return true;
+
+ mem_input->current_address = mem_input->request_address;
+ return false;
+}
+
+static bool dce_mi_program_surface_flip_and_addr(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mem_input);
+
+ REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
+
+ REG_UPDATE(
+ GRPH_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_H_RETRACE_EN, flip_immediate ? 1 : 0);
+
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ if (address->grph.addr.quad_part == 0)
+ break;
+ program_pri_addr(dce_mi, address->grph.addr);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0 ||
+ address->grph_stereo.right_addr.quad_part == 0)
+ break;
+ program_pri_addr(dce_mi, address->grph_stereo.left_addr);
+ program_sec_addr(dce_mi, address->grph_stereo.right_addr);
+ break;
+ default:
+ /* not supported */
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ mem_input->request_address = *address;
+
+ if (flip_immediate)
+ mem_input->current_address = *address;
+
+ REG_UPDATE(GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
+
+ return true;
+}
+
+static struct mem_input_funcs dce_mi_funcs = {
+ .mem_input_program_display_marks = dce_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+
+void dce_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mi->base.ctx = ctx;
+
+ dce_mi->base.inst = inst;
+ dce_mi->base.funcs = &dce_mi_funcs;
+
+ dce_mi->regs = regs;
+ dce_mi->shifts = mi_shift;
+ dce_mi->masks = mi_mask;
+}
+
+void dce112_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask)
+{
+ dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
+ dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
new file mode 100644
index 000000000000..05d39c0cbe87
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DCE_MEM_INPUT_H__
+#define __DCE_MEM_INPUT_H__
+
+#include "dc_hw_types.h"
+#include "mem_input.h"
+
+#define TO_DCE_MEM_INPUT(mem_input)\
+ container_of(mem_input, struct dce_mem_input, base)
+
+#define MI_DCE_BASE_REG_LIST(id)\
+ SRI(GRPH_ENABLE, DCP, id),\
+ SRI(GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_X_START, DCP, id),\
+ SRI(GRPH_Y_START, DCP, id),\
+ SRI(GRPH_X_END, DCP, id),\
+ SRI(GRPH_Y_END, DCP, id),\
+ SRI(GRPH_PITCH, DCP, id),\
+ SRI(HW_ROTATION, DCP, id),\
+ SRI(GRPH_SWAP_CNTL, DCP, id),\
+ SRI(PRESCALE_GRPH_CONTROL, DCP, id),\
+ SRI(GRPH_UPDATE, DCP, id),\
+ SRI(GRPH_FLIP_CONTROL, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS, DCP, id),\
+ SRI(GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, DCP, id),\
+ SRI(DPG_PIPE_ARBITRATION_CONTROL1, DMIF_PG, id),\
+ SRI(DPG_WATERMARK_MASK_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_URGENCY_CONTROL, DMIF_PG, id),\
+ SRI(DPG_PIPE_STUTTER_CONTROL, DMIF_PG, id),\
+ SRI(DMIF_BUFFER_CONTROL, PIPE, id)
+
+#define MI_DCE_PTE_REG_LIST(id)\
+ SRI(DVMM_PTE_CONTROL, DCP, id),\
+ SRI(DVMM_PTE_ARB_CONTROL, DCP, id)
+
+#define MI_DCE8_REG_LIST(id)\
+ MI_DCE_BASE_REG_LIST(id),\
+ SRI(DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, DMIF_PG, id)
+
+#define MI_DCE11_2_REG_LIST(id)\
+ MI_DCE8_REG_LIST(id),\
+ SRI(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, DCP, id)
+
+#define MI_DCE11_REG_LIST(id)\
+ MI_DCE11_2_REG_LIST(id),\
+ MI_DCE_PTE_REG_LIST(id)
+
+#define MI_DCE12_REG_LIST(id)\
+ MI_DCE_BASE_REG_LIST(id),\
+ MI_DCE_PTE_REG_LIST(id),\
+ SRI(GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, DCP, id),\
+ SRI(DPG_PIPE_STUTTER_CONTROL2, DMIF_PG, id),\
+ SRI(DPG_PIPE_LOW_POWER_CONTROL, DMIF_PG, id),\
+ SR(DCHUB_FB_LOCATION),\
+ SR(DCHUB_AGP_BASE),\
+ SR(DCHUB_AGP_BOT),\
+ SR(DCHUB_AGP_TOP)
+
+struct dce_mem_input_registers {
+ /* DCP */
+ uint32_t GRPH_ENABLE;
+ uint32_t GRPH_CONTROL;
+ uint32_t GRPH_X_START;
+ uint32_t GRPH_Y_START;
+ uint32_t GRPH_X_END;
+ uint32_t GRPH_Y_END;
+ uint32_t GRPH_PITCH;
+ uint32_t HW_ROTATION;
+ uint32_t GRPH_SWAP_CNTL;
+ uint32_t PRESCALE_GRPH_CONTROL;
+ uint32_t GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT;
+ uint32_t DVMM_PTE_CONTROL;
+ uint32_t DVMM_PTE_ARB_CONTROL;
+ uint32_t GRPH_UPDATE;
+ uint32_t GRPH_FLIP_CONTROL;
+ uint32_t GRPH_PRIMARY_SURFACE_ADDRESS;
+ uint32_t GRPH_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t GRPH_SECONDARY_SURFACE_ADDRESS;
+ uint32_t GRPH_SECONDARY_SURFACE_ADDRESS_HIGH;
+ /* DMIF_PG */
+ uint32_t DPG_PIPE_ARBITRATION_CONTROL1;
+ uint32_t DPG_WATERMARK_MASK_CONTROL;
+ uint32_t DPG_PIPE_URGENCY_CONTROL;
+ uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL;
+ uint32_t DPG_PIPE_LOW_POWER_CONTROL;
+ uint32_t DPG_PIPE_STUTTER_CONTROL;
+ uint32_t DPG_PIPE_STUTTER_CONTROL2;
+ /* DCI */
+ uint32_t DMIF_BUFFER_CONTROL;
+ /* MC_HUB */
+ uint32_t MC_HUB_RDREQ_DMIF_LIMIT;
+ /*DCHUB*/
+ uint32_t DCHUB_FB_LOCATION;
+ uint32_t DCHUB_AGP_BASE;
+ uint32_t DCHUB_AGP_BOT;
+ uint32_t DCHUB_AGP_TOP;
+};
+
+/* Set_Filed_for_Block */
+#define SFB(blk_name, reg_name, field_name, post_fix)\
+ .field_name = blk_name ## reg_name ## __ ## field_name ## post_fix
+
+#define MI_GFX8_TILE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_WIDTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_BANK_HEIGHT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_TILE_SPLIT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_PIPE_CONFIG, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_ARRAY_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+
+#define MI_DCP_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
+ SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
+ SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
+ SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
+ SFB(blk, GRPH_Y_END, GRPH_Y_END, mask_sh),\
+ SFB(blk, GRPH_PITCH, GRPH_PITCH, mask_sh),\
+ SFB(blk, HW_ROTATION, GRPH_ROTATION_ANGLE, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, mask_sh),\
+ SFB(blk, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_SELECT, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_R_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_G_SIGN, mask_sh),\
+ SFB(blk, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_B_SIGN, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ SFB(blk, GRPH_PRIMARY_SURFACE_ADDRESS, GRPH_PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING, mask_sh),\
+ SFB(blk, GRPH_UPDATE, GRPH_UPDATE_LOCK, mask_sh),\
+ SFB(blk, GRPH_FLIP_CONTROL, GRPH_SURFACE_UPDATE_H_RETRACE_EN, mask_sh)
+
+#define MI_DCP_DCE11_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT, mask_sh)
+
+#define MI_DCP_PTE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH, mask_sh),\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT, mask_sh),\
+ SFB(blk, DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP, mask_sh),\
+ SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK, mask_sh),\
+ SFB(blk, DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_ARBITRATION_CONTROL1, PIXEL_DURATION, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_IGNORE_FBC, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, mask_sh),\
+ SF(PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED, mask_sh)
+
+#define MI_DMIF_PG_MASK_SH_DCE(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, NB_PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_NB_PSTATE_CHANGE_CONTROL, NB_PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_DCE8_MASK_SH_LIST(mask_sh)\
+ MI_DCP_MASK_SH_LIST(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_LIST(mask_sh, ),\
+ MI_DMIF_PG_MASK_SH_DCE(mask_sh, ),\
+ MI_GFX8_TILE_MASK_SH_LIST(mask_sh, )
+
+#define MI_DCE11_2_MASK_SH_LIST(mask_sh)\
+ MI_DCE8_MASK_SH_LIST(mask_sh),\
+ MI_DCP_DCE11_MASK_SH_LIST(mask_sh, )
+
+#define MI_DCE11_MASK_SH_LIST(mask_sh)\
+ MI_DCE11_2_MASK_SH_LIST(mask_sh),\
+ MI_DCP_PTE_MASK_SH_LIST(mask_sh, )
+
+#define MI_GFX9_TILE_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, GRPH_CONTROL, GRPH_SW_MODE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_SE_ENABLE, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_SHADER_ENGINES, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_NUM_PIPES, mask_sh),\
+ SFB(blk, GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE, mask_sh)
+
+#define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\
+ SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\
+ SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST, mask_sh),\
+ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_WATERMARK, mask_sh)
+
+#define MI_GFX9_DCHUB_MASK_SH_LIST(mask_sh)\
+ SF(DCHUB_FB_LOCATION, FB_TOP, mask_sh),\
+ SF(DCHUB_FB_LOCATION, FB_BASE, mask_sh),\
+ SF(DCHUB_AGP_BASE, AGP_BASE, mask_sh),\
+ SF(DCHUB_AGP_BOT, AGP_BOT, mask_sh),\
+ SF(DCHUB_AGP_TOP, AGP_TOP, mask_sh)
+
+#define MI_DCE12_MASK_SH_LIST(mask_sh)\
+ MI_DCP_MASK_SH_LIST(mask_sh, DCP0_),\
+ SF(DCP0_GRPH_SECONDARY_SURFACE_ADDRESS, GRPH_SECONDARY_DFQ_ENABLE, mask_sh),\
+ MI_DCP_DCE11_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_DCP_PTE_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_DMIF_PG_MASK_SH_LIST(mask_sh, DMIF_PG0_),\
+ MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, DMIF_PG0_),\
+ MI_GFX9_TILE_MASK_SH_LIST(mask_sh, DCP0_),\
+ MI_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
+
+#define MI_REG_FIELD_LIST(type) \
+ type GRPH_ENABLE; \
+ type GRPH_X_START; \
+ type GRPH_Y_START; \
+ type GRPH_X_END; \
+ type GRPH_Y_END; \
+ type GRPH_PITCH; \
+ type GRPH_ROTATION_ANGLE; \
+ type GRPH_RED_CROSSBAR; \
+ type GRPH_BLUE_CROSSBAR; \
+ type GRPH_PRESCALE_SELECT; \
+ type GRPH_PRESCALE_R_SIGN; \
+ type GRPH_PRESCALE_G_SIGN; \
+ type GRPH_PRESCALE_B_SIGN; \
+ type GRPH_PIPE_OUTSTANDING_REQUEST_LIMIT; \
+ type DVMM_PAGE_WIDTH; \
+ type DVMM_PAGE_HEIGHT; \
+ type DVMM_MIN_PTE_BEFORE_FLIP; \
+ type DVMM_PTE_REQ_PER_CHUNK; \
+ type DVMM_MAX_PTE_REQ_OUTSTANDING; \
+ type GRPH_DEPTH; \
+ type GRPH_FORMAT; \
+ type GRPH_NUM_BANKS; \
+ type GRPH_BANK_WIDTH;\
+ type GRPH_BANK_HEIGHT;\
+ type GRPH_MACRO_TILE_ASPECT;\
+ type GRPH_TILE_SPLIT;\
+ type GRPH_MICRO_TILE_MODE;\
+ type GRPH_PIPE_CONFIG;\
+ type GRPH_ARRAY_MODE;\
+ type GRPH_COLOR_EXPANSION_MODE;\
+ type GRPH_SW_MODE; \
+ type GRPH_SE_ENABLE; \
+ type GRPH_NUM_SHADER_ENGINES; \
+ type GRPH_NUM_PIPES; \
+ type GRPH_SECONDARY_SURFACE_ADDRESS_HIGH; \
+ type GRPH_SECONDARY_SURFACE_ADDRESS; \
+ type GRPH_SECONDARY_DFQ_ENABLE; \
+ type GRPH_PRIMARY_SURFACE_ADDRESS_HIGH; \
+ type GRPH_PRIMARY_SURFACE_ADDRESS; \
+ type GRPH_SURFACE_UPDATE_PENDING; \
+ type GRPH_SURFACE_UPDATE_H_RETRACE_EN; \
+ type GRPH_UPDATE_LOCK; \
+ type PIXEL_DURATION; \
+ type URGENCY_WATERMARK_MASK; \
+ type PSTATE_CHANGE_WATERMARK_MASK; \
+ type NB_PSTATE_CHANGE_WATERMARK_MASK; \
+ type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \
+ type URGENCY_LOW_WATERMARK; \
+ type URGENCY_HIGH_WATERMARK; \
+ type NB_PSTATE_CHANGE_ENABLE; \
+ type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+ type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+ type NB_PSTATE_CHANGE_WATERMARK; \
+ type PSTATE_CHANGE_ENABLE; \
+ type PSTATE_CHANGE_URGENT_DURING_REQUEST; \
+ type PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \
+ type PSTATE_CHANGE_WATERMARK; \
+ type STUTTER_ENABLE; \
+ type STUTTER_IGNORE_FBC; \
+ type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \
+ type DMIF_BUFFERS_ALLOCATED; \
+ type DMIF_BUFFERS_ALLOCATION_COMPLETED; \
+ type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\
+ type FB_BASE; \
+ type FB_TOP; \
+ type AGP_BASE; \
+ type AGP_TOP; \
+ type AGP_BOT; \
+
+struct dce_mem_input_shift {
+ MI_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_mem_input_mask {
+ MI_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_mem_input_wa {
+ uint8_t single_head_rdreq_dmif_limit;
+};
+
+struct dce_mem_input {
+ struct mem_input base;
+
+ const struct dce_mem_input_registers *regs;
+ const struct dce_mem_input_shift *shifts;
+ const struct dce_mem_input_mask *masks;
+
+ struct dce_mem_input_wa wa;
+};
+
+void dce_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+
+void dce112_mem_input_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx,
+ int inst,
+ const struct dce_mem_input_registers *regs,
+ const struct dce_mem_input_shift *mi_shift,
+ const struct dce_mem_input_mask *mi_mask);
+
+#endif /*__DCE_MEM_INPUT_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
new file mode 100644
index 000000000000..3931412ab6d3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "basics/conversion.h"
+
+#include "dce_opp.h"
+
+#include "reg_helper.h"
+
+#define REG(reg)\
+ (opp110->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ opp110->opp_shift->field_name, opp110->opp_mask->field_name
+
+#define CTX \
+ opp110->base.ctx
+
+enum {
+ MAX_PWL_ENTRY = 128,
+ MAX_REGIONS_NUMBER = 16
+};
+
+enum {
+ MAX_LUT_ENTRY = 256,
+ MAX_NUMBER_OF_ENTRIES = 256
+};
+
+
+enum {
+ OUTPUT_CSC_MATRIX_SIZE = 12
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ *****************************************************************************
+ * Function: regamma_config_regions_and_segments
+ *
+ * build regamma curve by using predefined hw points
+ * uses interface parameters ,like EDID coeff.
+ *
+ * @param : parameters interface parameters
+ * @return void
+ *
+ * @note
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+
+
+
+/**
+ * set_truncation
+ * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
+ * 2) enable truncation
+ * 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ */
+static void set_truncation(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable truncation*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 0,
+ FMT_TRUNCATE_DEPTH, 0,
+ FMT_TRUNCATE_MODE, 0);
+
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ /* 8bpc trunc on YCbCr422*/
+ if (params->flags.TRUNCATE_DEPTH == 1)
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 1,
+ FMT_TRUNCATE_MODE, 0);
+ else if (params->flags.TRUNCATE_DEPTH == 2)
+ /* 10bpc trunc on YCbCr422*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH, 2,
+ FMT_TRUNCATE_MODE, 0);
+ return;
+ }
+ /* on other format-to do */
+ if (params->flags.TRUNCATE_ENABLED == 0 ||
+ params->flags.TRUNCATE_DEPTH == 2)
+ return;
+ /*Set truncation depth and Enable truncation*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, 1,
+ FMT_TRUNCATE_DEPTH,
+ params->flags.TRUNCATE_MODE,
+ FMT_TRUNCATE_MODE,
+ params->flags.TRUNCATE_DEPTH);
+}
+
+
+/**
+ * set_spatial_dither
+ * 1) set spatial dithering mode: pattern of seed
+ * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp
+ * 3) set random seed
+ * 4) set random mode
+ * lfsr is reset every frame or not reset
+ * RGB dithering method
+ * 0: RGB data are all dithered with x^28+x^3+1
+ * 1: R data is dithered with x^28+x^3+1
+ * G data is dithered with x^28+X^9+1
+ * B data is dithered with x^28+x^13+1
+ * enable high pass filter or not
+ * 5) enable spatical dithering
+ */
+static void set_spatial_dither(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable spatial (random) dithering*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_EN, 0,
+ FMT_SPATIAL_DITHER_DEPTH, 0,
+ FMT_SPATIAL_DITHER_MODE, 0);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_HIGHPASS_RANDOM_ENABLE, 0,
+ FMT_FRAME_RANDOM_ENABLE, 0,
+ FMT_RGB_RANDOM_ENABLE, 0);
+
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 0);
+
+ /* no 10bpc on DCE11*/
+ if (params->flags.SPATIAL_DITHER_ENABLED == 0 ||
+ params->flags.SPATIAL_DITHER_DEPTH == 2)
+ return;
+
+ /* only use FRAME_COUNTER_MAX if frameRandom == 1*/
+
+ if (opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX &&
+ opp110->opp_mask->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP) {
+ if (params->flags.FRAME_RANDOM == 1) {
+ if (params->flags.SPATIAL_DITHER_DEPTH == 0 ||
+ params->flags.SPATIAL_DITHER_DEPTH == 1) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2);
+ } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1);
+ } else
+ return;
+ } else {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0);
+ }
+ }
+ /* Set seed for random values for
+ * spatial dithering for R,G,B channels
+ */
+ REG_UPDATE(FMT_DITHER_RAND_R_SEED,
+ FMT_RAND_R_SEED, params->r_seed_value);
+
+ REG_UPDATE(FMT_DITHER_RAND_G_SEED,
+ FMT_RAND_G_SEED, params->g_seed_value);
+
+ REG_UPDATE(FMT_DITHER_RAND_B_SEED,
+ FMT_RAND_B_SEED, params->b_seed_value);
+
+ /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
+ * offset for the R/Cr channel, lower 4LSB
+ * is forced to zeros. Typically set to 0
+ * RGB and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
+ * offset for the G/Y channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB
+ * and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
+ * offset for the B/Cb channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB and
+ * 0x80000 YCbCr.
+ */
+
+ /* Disable High pass filter
+ * Reset only at startup
+ * Set RGB data dithered with x^28+x^3+1
+ */
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM,
+ FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM,
+ FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
+
+ /* Set spatial dithering bit depth
+ * Set spatial dithering mode
+ * (default is Seed patterrn AAAA...)
+ * Enable spatial dithering
+ */
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH,
+ FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE,
+ FMT_SPATIAL_DITHER_EN, 1);
+}
+
+/**
+ * SetTemporalDither (Frame Modulation)
+ * 1) set temporal dither depth
+ * 2) select pattern: from hard-coded pattern or programmable pattern
+ * 3) select optimized strips for BGR or RGB LCD sub-pixel
+ * 4) set s matrix
+ * 5) set t matrix
+ * 6) set grey level for 0.25, 0.5, 0.75
+ * 7) enable temporal dithering
+ */
+
+static void set_temporal_dither(
+ struct dce110_opp *opp110,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable temporal (frame modulation) dithering first*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 0,
+ FMT_TEMPORAL_DITHER_RESET, 0,
+ FMT_TEMPORAL_DITHER_OFFSET, 0);
+
+ REG_UPDATE_2(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_DEPTH, 0,
+ FMT_TEMPORAL_LEVEL, 0);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_25FRC_SEL, 0,
+ FMT_50FRC_SEL, 0,
+ FMT_75FRC_SEL, 0);
+
+ /* no 10bpc dither on DCE11*/
+ if (params->flags.FRAME_MODULATION_ENABLED == 0 ||
+ params->flags.FRAME_MODULATION_DEPTH == 2)
+ return;
+
+ /* Set temporal dithering depth*/
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_DEPTH, params->flags.FRAME_MODULATION_DEPTH,
+ FMT_TEMPORAL_DITHER_RESET, 0,
+ FMT_TEMPORAL_DITHER_OFFSET, 0);
+
+ /*Select legacy pattern based on FRC and Temporal level*/
+ if (REG(FMT_TEMPORAL_DITHER_PATTERN_CONTROL)) {
+ REG_WRITE(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, 0);
+ /*Set s matrix*/
+ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, 0);
+ /*Set t matrix*/
+ REG_WRITE(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, 0);
+ }
+
+ /*Select patterns for 0.25, 0.5 and 0.75 grey level*/
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_LEVEL, params->flags.TEMPORAL_LEVEL);
+
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_25FRC_SEL, params->flags.FRC25,
+ FMT_50FRC_SEL, params->flags.FRC50,
+ FMT_75FRC_SEL, params->flags.FRC75);
+
+ /*Enable bit reduction by temporal (frame modulation) dithering*/
+ REG_UPDATE(FMT_BIT_DEPTH_CONTROL,
+ FMT_TEMPORAL_DITHER_EN, 1);
+}
+
+/**
+ * Set Clamping
+ * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
+ * 1 for 8 bpc
+ * 2 for 10 bpc
+ * 3 for 12 bpc
+ * 7 for programable
+ * 2) Enable clamp if Limited range requested
+ */
+void dce110_opp_set_clamping(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 0,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+
+ switch (params->clamping_level) {
+ case CLAMPING_FULL_RANGE:
+ break;
+ case CLAMPING_LIMITED_RANGE_8BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 1);
+ break;
+ case CLAMPING_LIMITED_RANGE_10BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 2);
+ break;
+ case CLAMPING_LIMITED_RANGE_12BPC:
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 3);
+ break;
+ case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
+ /*Set clamp control*/
+ REG_SET_2(FMT_CLAMP_CNTL, 0,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 7);
+
+ /*set the defaults*/
+ REG_SET_2(FMT_CLAMP_COMPONENT_R, 0,
+ FMT_CLAMP_LOWER_R, 0x10,
+ FMT_CLAMP_UPPER_R, 0xFEF);
+
+ REG_SET_2(FMT_CLAMP_COMPONENT_G, 0,
+ FMT_CLAMP_LOWER_G, 0x10,
+ FMT_CLAMP_UPPER_G, 0xFEF);
+
+ REG_SET_2(FMT_CLAMP_COMPONENT_B, 0,
+ FMT_CLAMP_LOWER_B, 0x10,
+ FMT_CLAMP_UPPER_B, 0xFEF);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * set_pixel_encoding
+ *
+ * Set Pixel Encoding
+ * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
+ * 1: YCbCr 4:2:2
+ */
+static void set_pixel_encoding(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ if (opp110->opp_mask->FMT_CBCR_BIT_REDUCTION_BYPASS)
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 0);
+ else
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 0,
+ FMT_SUBSAMPLING_MODE, 0);
+
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 1,
+ FMT_SUBSAMPLING_ORDER, 0);
+ }
+ if (params->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ REG_UPDATE_3(FMT_CONTROL,
+ FMT_PIXEL_ENCODING, 2,
+ FMT_SUBSAMPLING_MODE, 2,
+ FMT_CBCR_BIT_REDUCTION_BYPASS, 1);
+ }
+
+}
+
+void dce110_opp_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ set_truncation(opp110, params);
+ set_spatial_dither(opp110, params);
+ set_temporal_dither(opp110, params);
+}
+
+void dce110_opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ dce110_opp_set_clamping(opp110, params);
+ set_pixel_encoding(opp110, params);
+}
+
+static void program_formatter_420_memory(struct output_pixel_processor *opp)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+ uint32_t fmt_mem_cntl_value;
+
+ /* Program source select*/
+ /* Use HW default source select for FMT_MEMORYx_CONTROL */
+ /* Use that value for FMT_SRC_SELECT as well*/
+ REG_GET(CONTROL,
+ FMT420_MEM0_SOURCE_SEL, &fmt_mem_cntl_value);
+
+ REG_UPDATE(FMT_CONTROL,
+ FMT_SRC_SELECT, fmt_mem_cntl_value);
+
+ /* Turn on the memory */
+ REG_UPDATE(CONTROL,
+ FMT420_MEM0_PWR_FORCE, 0);
+}
+
+void dce110_opp_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 0,
+ FMT_DYNAMIC_EXP_MODE, 0);
+
+ /*00 - 10-bit -> 12-bit dynamic expansion*/
+ /*01 - 8-bit -> 12-bit dynamic expansion*/
+ if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ switch (color_dpth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 1);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE_2(
+ FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void program_formatter_reset_dig_resync_fifo(struct output_pixel_processor *opp)
+{
+ struct dce110_opp *opp110 = TO_DCE110_OPP(opp);
+
+ /* clear previous phase lock status*/
+ REG_UPDATE(FMT_CONTROL,
+ FMT_420_PIXEL_PHASE_LOCKED_CLEAR, 1);
+
+ /* poll until FMT_420_PIXEL_PHASE_LOCKED become 1*/
+ REG_WAIT(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, 1, 10, 10);
+
+}
+
+void dce110_opp_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_420_memory(opp);
+
+ dce110_opp_program_bit_depth_reduction(
+ opp,
+ fmt_bit_depth);
+
+ dce110_opp_program_clamping_and_pixel_encoding(
+ opp,
+ clamping);
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ program_formatter_reset_dig_resync_fifo(opp);
+
+ return;
+}
+
+
+
+
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static const struct opp_funcs funcs = {
+ .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
+ .opp_destroy = dce110_opp_destroy,
+ .opp_program_fmt = dce110_opp_program_fmt,
+ .opp_program_bit_depth_reduction = dce110_opp_program_bit_depth_reduction
+};
+
+void dce110_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask)
+{
+ opp110->base.funcs = &funcs;
+
+ opp110->base.ctx = ctx;
+
+ opp110->base.inst = inst;
+
+ opp110->regs = regs;
+ opp110->opp_shift = opp_shift;
+ opp110->opp_mask = opp_mask;
+}
+
+void dce110_opp_destroy(struct output_pixel_processor **opp)
+{
+ if (*opp)
+ kfree(FROM_DCE11_OPP(*opp));
+ *opp = NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
new file mode 100644
index 000000000000..2ab0147cbd9d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h
@@ -0,0 +1,310 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCE_H__
+#define __DC_OPP_DCE_H__
+
+#include "dc_types.h"
+#include "opp.h"
+#include "core_types.h"
+
+#define FROM_DCE11_OPP(opp)\
+ container_of(opp, struct dce110_opp, base)
+
+enum dce110_opp_reg_type {
+ DCE110_OPP_REG_DCP = 0,
+ DCE110_OPP_REG_DCFE,
+ DCE110_OPP_REG_FMT,
+
+ DCE110_OPP_REG_MAX
+};
+
+#define OPP_COMMON_REG_LIST_BASE(id) \
+ SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \
+ SRI(FMT_CONTROL, FMT, id), \
+ SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI(FMT_CLAMP_CNTL, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_R, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_G, FMT, id), \
+ SRI(FMT_CLAMP_COMPONENT_B, FMT, id)
+
+#define OPP_DCE_80_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_100_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_110_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id)
+
+#define OPP_DCE_112_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(FMT_TEMPORAL_DITHER_PATTERN_CONTROL, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX, FMT, id), \
+ SRI(FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX, FMT, id), \
+ SRI(CONTROL, FMT_MEMORY, id)
+
+#define OPP_DCE_120_REG_LIST(id) \
+ OPP_COMMON_REG_LIST_BASE(id), \
+ SRI(CONTROL, FMT_MEMORY, id)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\
+ OPP_SF(FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
+ OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\
+ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SRC_SELECT, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\
+ OPP_SF(FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_R, FMT_CLAMP_LOWER_R, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_R, FMT_CLAMP_UPPER_R, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_G, FMT_CLAMP_LOWER_G, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_G, FMT_CLAMP_UPPER_G, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_B, FMT_CLAMP_LOWER_B, mask_sh),\
+ OPP_SF(FMT_CLAMP_COMPONENT_B, FMT_CLAMP_UPPER_B, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_110(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_100(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_112(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_SOURCE_SEL, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_PWR_FORCE, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED_CLEAR, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_80(mask_sh)\
+ OPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define OPP_COMMON_MASK_SH_LIST_DCE_120(mask_sh)\
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_LEVEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_25FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_50FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_75FRC_SEL, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh),\
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\
+ OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_SOURCE_SEL, mask_sh),\
+ OPP_SF(FMT_MEMORY0_CONTROL, FMT420_MEM0_PWR_FORCE, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SRC_SELECT, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED_CLEAR, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_420_PIXEL_PHASE_LOCKED, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_R, FMT_CLAMP_LOWER_R, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_R, FMT_CLAMP_UPPER_R, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_G, FMT_CLAMP_LOWER_G, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_G, FMT_CLAMP_UPPER_G, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_B, FMT_CLAMP_LOWER_B, mask_sh),\
+ OPP_SF(FMT0_FMT_CLAMP_COMPONENT_B, FMT_CLAMP_UPPER_B, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_ORDER, mask_sh),\
+ OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh)
+
+#define OPP_REG_FIELD_LIST(type) \
+ type FMT_DYNAMIC_EXP_EN; \
+ type FMT_DYNAMIC_EXP_MODE; \
+ type FMT_TRUNCATE_EN; \
+ type FMT_TRUNCATE_DEPTH; \
+ type FMT_TRUNCATE_MODE; \
+ type FMT_SPATIAL_DITHER_EN; \
+ type FMT_SPATIAL_DITHER_DEPTH; \
+ type FMT_SPATIAL_DITHER_MODE; \
+ type FMT_TEMPORAL_DITHER_EN; \
+ type FMT_TEMPORAL_DITHER_RESET; \
+ type FMT_TEMPORAL_DITHER_OFFSET; \
+ type FMT_TEMPORAL_DITHER_DEPTH; \
+ type FMT_TEMPORAL_LEVEL; \
+ type FMT_25FRC_SEL; \
+ type FMT_50FRC_SEL; \
+ type FMT_75FRC_SEL; \
+ type FMT_HIGHPASS_RANDOM_ENABLE; \
+ type FMT_FRAME_RANDOM_ENABLE; \
+ type FMT_RGB_RANDOM_ENABLE; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \
+ type FMT_STEREOSYNC_OVERRIDE; \
+ type FMT_RAND_R_SEED; \
+ type FMT_RAND_G_SEED; \
+ type FMT_RAND_B_SEED; \
+ type FMT420_MEM0_SOURCE_SEL; \
+ type FMT420_MEM0_PWR_FORCE; \
+ type FMT_SRC_SELECT; \
+ type FMT_420_PIXEL_PHASE_LOCKED_CLEAR; \
+ type FMT_420_PIXEL_PHASE_LOCKED; \
+ type FMT_CLAMP_DATA_EN; \
+ type FMT_CLAMP_COLOR_FORMAT; \
+ type FMT_CLAMP_LOWER_R; \
+ type FMT_CLAMP_UPPER_R; \
+ type FMT_CLAMP_LOWER_G; \
+ type FMT_CLAMP_UPPER_G; \
+ type FMT_CLAMP_LOWER_B; \
+ type FMT_CLAMP_UPPER_B; \
+ type FMT_PIXEL_ENCODING; \
+ type FMT_SUBSAMPLING_ORDER; \
+ type FMT_SUBSAMPLING_MODE; \
+ type FMT_CBCR_BIT_REDUCTION_BYPASS;\
+
+struct dce_opp_shift {
+ OPP_REG_FIELD_LIST(uint8_t)
+};
+
+struct dce_opp_mask {
+ OPP_REG_FIELD_LIST(uint32_t)
+};
+
+struct dce_opp_registers {
+ uint32_t FMT_DYNAMIC_EXP_CNTL;
+ uint32_t FMT_BIT_DEPTH_CONTROL;
+ uint32_t FMT_CONTROL;
+ uint32_t FMT_DITHER_RAND_R_SEED;
+ uint32_t FMT_DITHER_RAND_G_SEED;
+ uint32_t FMT_DITHER_RAND_B_SEED;
+ uint32_t FMT_TEMPORAL_DITHER_PATTERN_CONTROL;
+ uint32_t FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_S_MATRIX;
+ uint32_t FMT_TEMPORAL_DITHER_PROGRAMMABLE_PATTERN_T_MATRIX;
+ uint32_t CONTROL;
+ uint32_t FMT_CLAMP_CNTL;
+ uint32_t FMT_CLAMP_COMPONENT_R;
+ uint32_t FMT_CLAMP_COMPONENT_G;
+ uint32_t FMT_CLAMP_COMPONENT_B;
+};
+
+/* OPP RELATED */
+#define TO_DCE110_OPP(opp)\
+ container_of(opp, struct dce110_opp, base)
+
+struct dce110_opp {
+ struct output_pixel_processor base;
+ const struct dce_opp_registers *regs;
+ const struct dce_opp_shift *opp_shift;
+ const struct dce_opp_mask *opp_mask;
+};
+
+void dce110_opp_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_opp_registers *regs,
+ const struct dce_opp_shift *opp_shift,
+ const struct dce_opp_mask *opp_mask);
+
+void dce110_opp_destroy(struct output_pixel_processor **opp);
+
+
+
+/* FORMATTER RELATED */
+void dce110_opp_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params);
+
+void dce110_opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params);
+
+void dce110_opp_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal);
+
+void dce110_opp_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void dce110_opp_set_clamping(
+ struct dce110_opp *opp110,
+ const struct clamping_and_pixel_encoding_params *params);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
new file mode 100644
index 000000000000..6243450b41b7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -0,0 +1,1119 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "transform.h"
+
+static const uint16_t filter_2tap_16p[18] = {
+ 4096, 0,
+ 3840, 256,
+ 3584, 512,
+ 3328, 768,
+ 3072, 1024,
+ 2816, 1280,
+ 2560, 1536,
+ 2304, 1792,
+ 2048, 2048
+};
+
+static const uint16_t filter_3tap_16p_upscale[27] = {
+ 2048, 2048, 0,
+ 1708, 2424, 16348,
+ 1372, 2796, 16308,
+ 1056, 3148, 16272,
+ 768, 3464, 16244,
+ 512, 3728, 16236,
+ 296, 3928, 16252,
+ 124, 4052, 16296,
+ 0, 4096, 0
+};
+
+static const uint16_t filter_3tap_16p_117[27] = {
+ 2048, 2048, 0,
+ 1824, 2276, 16376,
+ 1600, 2496, 16380,
+ 1376, 2700, 16,
+ 1156, 2880, 52,
+ 948, 3032, 108,
+ 756, 3144, 192,
+ 580, 3212, 296,
+ 428, 3236, 428
+};
+
+static const uint16_t filter_3tap_16p_150[27] = {
+ 2048, 2048, 0,
+ 1872, 2184, 36,
+ 1692, 2308, 88,
+ 1516, 2420, 156,
+ 1340, 2516, 236,
+ 1168, 2592, 328,
+ 1004, 2648, 440,
+ 844, 2684, 560,
+ 696, 2696, 696
+};
+
+static const uint16_t filter_3tap_16p_183[27] = {
+ 2048, 2048, 0,
+ 1892, 2104, 92,
+ 1744, 2152, 196,
+ 1592, 2196, 300,
+ 1448, 2232, 412,
+ 1304, 2256, 528,
+ 1168, 2276, 648,
+ 1032, 2288, 772,
+ 900, 2292, 900
+};
+
+static const uint16_t filter_4tap_16p_upscale[36] = {
+ 0, 4096, 0, 0,
+ 16240, 4056, 180, 16380,
+ 16136, 3952, 404, 16364,
+ 16072, 3780, 664, 16344,
+ 16040, 3556, 952, 16312,
+ 16036, 3284, 1268, 16272,
+ 16052, 2980, 1604, 16224,
+ 16084, 2648, 1952, 16176,
+ 16128, 2304, 2304, 16128
+};
+
+static const uint16_t filter_4tap_16p_117[36] = {
+ 428, 3236, 428, 0,
+ 276, 3232, 604, 16364,
+ 148, 3184, 800, 16340,
+ 44, 3104, 1016, 16312,
+ 16344, 2984, 1244, 16284,
+ 16284, 2832, 1488, 16256,
+ 16244, 2648, 1732, 16236,
+ 16220, 2440, 1976, 16220,
+ 16212, 2216, 2216, 16212
+};
+
+static const uint16_t filter_4tap_16p_150[36] = {
+ 696, 2700, 696, 0,
+ 560, 2700, 848, 16364,
+ 436, 2676, 1008, 16348,
+ 328, 2628, 1180, 16336,
+ 232, 2556, 1356, 16328,
+ 152, 2460, 1536, 16328,
+ 84, 2344, 1716, 16332,
+ 28, 2208, 1888, 16348,
+ 16376, 2052, 2052, 16376
+};
+
+static const uint16_t filter_4tap_16p_183[36] = {
+ 940, 2208, 940, 0,
+ 832, 2200, 1052, 4,
+ 728, 2180, 1164, 16,
+ 628, 2148, 1280, 36,
+ 536, 2100, 1392, 60,
+ 448, 2044, 1504, 92,
+ 368, 1976, 1612, 132,
+ 296, 1900, 1716, 176,
+ 232, 1812, 1812, 232
+};
+
+static const uint16_t filter_2tap_64p[66] = {
+ 4096, 0,
+ 4032, 64,
+ 3968, 128,
+ 3904, 192,
+ 3840, 256,
+ 3776, 320,
+ 3712, 384,
+ 3648, 448,
+ 3584, 512,
+ 3520, 576,
+ 3456, 640,
+ 3392, 704,
+ 3328, 768,
+ 3264, 832,
+ 3200, 896,
+ 3136, 960,
+ 3072, 1024,
+ 3008, 1088,
+ 2944, 1152,
+ 2880, 1216,
+ 2816, 1280,
+ 2752, 1344,
+ 2688, 1408,
+ 2624, 1472,
+ 2560, 1536,
+ 2496, 1600,
+ 2432, 1664,
+ 2368, 1728,
+ 2304, 1792,
+ 2240, 1856,
+ 2176, 1920,
+ 2112, 1984,
+ 2048, 2048 };
+
+static const uint16_t filter_3tap_64p_upscale[99] = {
+ 2048, 2048, 0,
+ 1960, 2140, 16376,
+ 1876, 2236, 16364,
+ 1792, 2328, 16356,
+ 1708, 2424, 16348,
+ 1620, 2516, 16336,
+ 1540, 2612, 16328,
+ 1456, 2704, 16316,
+ 1372, 2796, 16308,
+ 1292, 2884, 16296,
+ 1212, 2976, 16288,
+ 1136, 3060, 16280,
+ 1056, 3148, 16272,
+ 984, 3228, 16264,
+ 908, 3312, 16256,
+ 836, 3388, 16248,
+ 768, 3464, 16244,
+ 700, 3536, 16240,
+ 636, 3604, 16236,
+ 572, 3668, 16236,
+ 512, 3728, 16236,
+ 456, 3784, 16236,
+ 400, 3836, 16240,
+ 348, 3884, 16244,
+ 296, 3928, 16252,
+ 252, 3964, 16260,
+ 204, 4000, 16268,
+ 164, 4028, 16284,
+ 124, 4052, 16296,
+ 88, 4072, 16316,
+ 56, 4084, 16336,
+ 24, 4092, 16356,
+ 0, 4096, 0
+};
+
+static const uint16_t filter_3tap_64p_117[99] = {
+ 2048, 2048, 0,
+ 1992, 2104, 16380,
+ 1936, 2160, 16380,
+ 1880, 2220, 16376,
+ 1824, 2276, 16376,
+ 1768, 2332, 16376,
+ 1712, 2388, 16376,
+ 1656, 2444, 16376,
+ 1600, 2496, 16380,
+ 1544, 2548, 0,
+ 1488, 2600, 4,
+ 1432, 2652, 8,
+ 1376, 2700, 16,
+ 1320, 2748, 20,
+ 1264, 2796, 32,
+ 1212, 2840, 40,
+ 1156, 2880, 52,
+ 1104, 2920, 64,
+ 1052, 2960, 80,
+ 1000, 2996, 92,
+ 948, 3032, 108,
+ 900, 3060, 128,
+ 852, 3092, 148,
+ 804, 3120, 168,
+ 756, 3144, 192,
+ 712, 3164, 216,
+ 668, 3184, 240,
+ 624, 3200, 268,
+ 580, 3212, 296,
+ 540, 3220, 328,
+ 500, 3228, 360,
+ 464, 3232, 392,
+ 428, 3236, 428
+};
+
+static const uint16_t filter_3tap_64p_150[99] = {
+ 2048, 2048, 0,
+ 2004, 2080, 8,
+ 1960, 2116, 16,
+ 1916, 2148, 28,
+ 1872, 2184, 36,
+ 1824, 2216, 48,
+ 1780, 2248, 60,
+ 1736, 2280, 76,
+ 1692, 2308, 88,
+ 1648, 2336, 104,
+ 1604, 2368, 120,
+ 1560, 2392, 136,
+ 1516, 2420, 156,
+ 1472, 2444, 172,
+ 1428, 2472, 192,
+ 1384, 2492, 212,
+ 1340, 2516, 236,
+ 1296, 2536, 256,
+ 1252, 2556, 280,
+ 1212, 2576, 304,
+ 1168, 2592, 328,
+ 1124, 2608, 356,
+ 1084, 2624, 384,
+ 1044, 2636, 412,
+ 1004, 2648, 440,
+ 964, 2660, 468,
+ 924, 2668, 500,
+ 884, 2676, 528,
+ 844, 2684, 560,
+ 808, 2688, 596,
+ 768, 2692, 628,
+ 732, 2696, 664,
+ 696, 2696, 696
+};
+
+static const uint16_t filter_3tap_64p_183[99] = {
+ 2048, 2048, 0,
+ 2008, 2060, 20,
+ 1968, 2076, 44,
+ 1932, 2088, 68,
+ 1892, 2104, 92,
+ 1856, 2116, 120,
+ 1816, 2128, 144,
+ 1780, 2140, 168,
+ 1744, 2152, 196,
+ 1704, 2164, 220,
+ 1668, 2176, 248,
+ 1632, 2188, 272,
+ 1592, 2196, 300,
+ 1556, 2204, 328,
+ 1520, 2216, 356,
+ 1484, 2224, 384,
+ 1448, 2232, 412,
+ 1412, 2240, 440,
+ 1376, 2244, 468,
+ 1340, 2252, 496,
+ 1304, 2256, 528,
+ 1272, 2264, 556,
+ 1236, 2268, 584,
+ 1200, 2272, 616,
+ 1168, 2276, 648,
+ 1132, 2280, 676,
+ 1100, 2284, 708,
+ 1064, 2288, 740,
+ 1032, 2288, 772,
+ 996, 2292, 800,
+ 964, 2292, 832,
+ 932, 2292, 868,
+ 900, 2292, 900
+};
+
+static const uint16_t filter_4tap_64p_upscale[132] = {
+ 0, 4096, 0, 0,
+ 16344, 4092, 40, 0,
+ 16308, 4084, 84, 16380,
+ 16272, 4072, 132, 16380,
+ 16240, 4056, 180, 16380,
+ 16212, 4036, 232, 16376,
+ 16184, 4012, 288, 16372,
+ 16160, 3984, 344, 16368,
+ 16136, 3952, 404, 16364,
+ 16116, 3916, 464, 16360,
+ 16100, 3872, 528, 16356,
+ 16084, 3828, 596, 16348,
+ 16072, 3780, 664, 16344,
+ 16060, 3728, 732, 16336,
+ 16052, 3676, 804, 16328,
+ 16044, 3616, 876, 16320,
+ 16040, 3556, 952, 16312,
+ 16036, 3492, 1028, 16300,
+ 16032, 3424, 1108, 16292,
+ 16032, 3356, 1188, 16280,
+ 16036, 3284, 1268, 16272,
+ 16036, 3212, 1352, 16260,
+ 16040, 3136, 1436, 16248,
+ 16044, 3056, 1520, 16236,
+ 16052, 2980, 1604, 16224,
+ 16060, 2896, 1688, 16212,
+ 16064, 2816, 1776, 16200,
+ 16076, 2732, 1864, 16188,
+ 16084, 2648, 1952, 16176,
+ 16092, 2564, 2040, 16164,
+ 16104, 2476, 2128, 16152,
+ 16116, 2388, 2216, 16140,
+ 16128, 2304, 2304, 16128 };
+
+static const uint16_t filter_4tap_64p_117[132] = {
+ 420, 3248, 420, 0,
+ 380, 3248, 464, 16380,
+ 344, 3248, 508, 16372,
+ 308, 3248, 552, 16368,
+ 272, 3240, 596, 16364,
+ 236, 3236, 644, 16356,
+ 204, 3224, 692, 16352,
+ 172, 3212, 744, 16344,
+ 144, 3196, 796, 16340,
+ 116, 3180, 848, 16332,
+ 88, 3160, 900, 16324,
+ 60, 3136, 956, 16320,
+ 36, 3112, 1012, 16312,
+ 16, 3084, 1068, 16304,
+ 16380, 3056, 1124, 16296,
+ 16360, 3024, 1184, 16292,
+ 16340, 2992, 1244, 16284,
+ 16324, 2956, 1304, 16276,
+ 16308, 2920, 1364, 16268,
+ 16292, 2880, 1424, 16264,
+ 16280, 2836, 1484, 16256,
+ 16268, 2792, 1548, 16252,
+ 16256, 2748, 1608, 16244,
+ 16248, 2700, 1668, 16240,
+ 16240, 2652, 1732, 16232,
+ 16232, 2604, 1792, 16228,
+ 16228, 2552, 1856, 16224,
+ 16220, 2500, 1916, 16220,
+ 16216, 2444, 1980, 16216,
+ 16216, 2388, 2040, 16216,
+ 16212, 2332, 2100, 16212,
+ 16212, 2276, 2160, 16212,
+ 16212, 2220, 2220, 16212 };
+
+static const uint16_t filter_4tap_64p_150[132] = {
+ 696, 2700, 696, 0,
+ 660, 2704, 732, 16380,
+ 628, 2704, 768, 16376,
+ 596, 2704, 804, 16372,
+ 564, 2700, 844, 16364,
+ 532, 2696, 884, 16360,
+ 500, 2692, 924, 16356,
+ 472, 2684, 964, 16352,
+ 440, 2676, 1004, 16352,
+ 412, 2668, 1044, 16348,
+ 384, 2656, 1088, 16344,
+ 360, 2644, 1128, 16340,
+ 332, 2632, 1172, 16336,
+ 308, 2616, 1216, 16336,
+ 284, 2600, 1260, 16332,
+ 260, 2580, 1304, 16332,
+ 236, 2560, 1348, 16328,
+ 216, 2540, 1392, 16328,
+ 196, 2516, 1436, 16328,
+ 176, 2492, 1480, 16324,
+ 156, 2468, 1524, 16324,
+ 136, 2440, 1568, 16328,
+ 120, 2412, 1612, 16328,
+ 104, 2384, 1656, 16328,
+ 88, 2352, 1700, 16332,
+ 72, 2324, 1744, 16332,
+ 60, 2288, 1788, 16336,
+ 48, 2256, 1828, 16340,
+ 36, 2220, 1872, 16344,
+ 24, 2184, 1912, 16352,
+ 12, 2148, 1952, 16356,
+ 4, 2112, 1996, 16364,
+ 16380, 2072, 2036, 16372 };
+
+static const uint16_t filter_4tap_64p_183[132] = {
+ 944, 2204, 944, 0,
+ 916, 2204, 972, 0,
+ 888, 2200, 996, 0,
+ 860, 2200, 1024, 4,
+ 832, 2196, 1052, 4,
+ 808, 2192, 1080, 8,
+ 780, 2188, 1108, 12,
+ 756, 2180, 1140, 12,
+ 728, 2176, 1168, 16,
+ 704, 2168, 1196, 20,
+ 680, 2160, 1224, 24,
+ 656, 2152, 1252, 28,
+ 632, 2144, 1280, 36,
+ 608, 2132, 1308, 40,
+ 584, 2120, 1336, 48,
+ 560, 2112, 1364, 52,
+ 536, 2096, 1392, 60,
+ 516, 2084, 1420, 68,
+ 492, 2072, 1448, 76,
+ 472, 2056, 1476, 84,
+ 452, 2040, 1504, 92,
+ 428, 2024, 1532, 100,
+ 408, 2008, 1560, 112,
+ 392, 1992, 1584, 120,
+ 372, 1972, 1612, 132,
+ 352, 1956, 1636, 144,
+ 336, 1936, 1664, 156,
+ 316, 1916, 1688, 168,
+ 300, 1896, 1712, 180,
+ 284, 1876, 1736, 192,
+ 268, 1852, 1760, 208,
+ 252, 1832, 1784, 220,
+ 236, 1808, 1808, 236 };
+
+static const uint16_t filter_5tap_64p_upscale[165] = {
+ 15936, 2496, 2496, 15936, 0,
+ 15948, 2404, 2580, 15924, 0,
+ 15960, 2312, 2664, 15912, 4,
+ 15976, 2220, 2748, 15904, 8,
+ 15992, 2128, 2832, 15896, 12,
+ 16004, 2036, 2912, 15888, 16,
+ 16020, 1944, 2992, 15880, 20,
+ 16036, 1852, 3068, 15876, 20,
+ 16056, 1760, 3140, 15876, 24,
+ 16072, 1668, 3216, 15872, 28,
+ 16088, 1580, 3284, 15872, 32,
+ 16104, 1492, 3352, 15876, 32,
+ 16120, 1404, 3420, 15876, 36,
+ 16140, 1316, 3480, 15884, 40,
+ 16156, 1228, 3540, 15892, 40,
+ 16172, 1144, 3600, 15900, 40,
+ 16188, 1060, 3652, 15908, 44,
+ 16204, 980, 3704, 15924, 44,
+ 16220, 900, 3756, 15936, 44,
+ 16236, 824, 3800, 15956, 44,
+ 16248, 744, 3844, 15972, 44,
+ 16264, 672, 3884, 15996, 44,
+ 16276, 600, 3920, 16020, 44,
+ 16292, 528, 3952, 16044, 40,
+ 16304, 460, 3980, 16072, 40,
+ 16316, 396, 4008, 16104, 36,
+ 16328, 332, 4032, 16136, 32,
+ 16336, 272, 4048, 16172, 28,
+ 16348, 212, 4064, 16208, 24,
+ 16356, 156, 4080, 16248, 16,
+ 16368, 100, 4088, 16292, 12,
+ 16376, 48, 4092, 16336, 4,
+ 0, 0, 4096, 0, 0 };
+
+static const uint16_t filter_5tap_64p_117[165] = {
+ 16056, 2372, 2372, 16056, 0,
+ 16052, 2312, 2432, 16060, 0,
+ 16052, 2252, 2488, 16064, 0,
+ 16052, 2188, 2548, 16072, 0,
+ 16052, 2124, 2600, 16076, 0,
+ 16052, 2064, 2656, 16088, 0,
+ 16052, 2000, 2708, 16096, 0,
+ 16056, 1932, 2760, 16108, 0,
+ 16060, 1868, 2808, 16120, 0,
+ 16064, 1804, 2856, 16132, 0,
+ 16068, 1740, 2904, 16148, 16380,
+ 16076, 1676, 2948, 16164, 16380,
+ 16080, 1612, 2992, 16180, 16376,
+ 16088, 1544, 3032, 16200, 16372,
+ 16096, 1480, 3072, 16220, 16372,
+ 16104, 1420, 3108, 16244, 16368,
+ 16112, 1356, 3144, 16268, 16364,
+ 16120, 1292, 3180, 16292, 16360,
+ 16128, 1232, 3212, 16320, 16356,
+ 16136, 1168, 3240, 16344, 16352,
+ 16144, 1108, 3268, 16376, 16344,
+ 16156, 1048, 3292, 20, 16340,
+ 16164, 988, 3316, 52, 16332,
+ 16172, 932, 3336, 88, 16328,
+ 16184, 872, 3356, 124, 16320,
+ 16192, 816, 3372, 160, 16316,
+ 16204, 760, 3388, 196, 16308,
+ 16212, 708, 3400, 236, 16300,
+ 16220, 656, 3412, 276, 16292,
+ 16232, 604, 3420, 320, 16284,
+ 16240, 552, 3424, 364, 16276,
+ 16248, 504, 3428, 408, 16268,
+ 16256, 456, 3428, 456, 16256 };
+
+static const uint16_t filter_5tap_64p_150[165] = {
+ 16368, 2064, 2064, 16368, 0,
+ 16352, 2028, 2100, 16380, 16380,
+ 16340, 1996, 2132, 12, 16376,
+ 16328, 1960, 2168, 24, 16376,
+ 16316, 1924, 2204, 44, 16372,
+ 16308, 1888, 2236, 60, 16368,
+ 16296, 1848, 2268, 76, 16364,
+ 16288, 1812, 2300, 96, 16360,
+ 16280, 1772, 2328, 116, 16356,
+ 16272, 1736, 2360, 136, 16352,
+ 16268, 1696, 2388, 160, 16348,
+ 16260, 1656, 2416, 180, 16344,
+ 16256, 1616, 2440, 204, 16340,
+ 16248, 1576, 2464, 228, 16336,
+ 16244, 1536, 2492, 252, 16332,
+ 16240, 1496, 2512, 276, 16324,
+ 16240, 1456, 2536, 304, 16320,
+ 16236, 1416, 2556, 332, 16316,
+ 16232, 1376, 2576, 360, 16312,
+ 16232, 1336, 2592, 388, 16308,
+ 16232, 1296, 2612, 416, 16300,
+ 16232, 1256, 2628, 448, 16296,
+ 16232, 1216, 2640, 480, 16292,
+ 16232, 1172, 2652, 512, 16288,
+ 16232, 1132, 2664, 544, 16284,
+ 16232, 1092, 2676, 576, 16280,
+ 16236, 1056, 2684, 608, 16272,
+ 16236, 1016, 2692, 644, 16268,
+ 16240, 976, 2700, 680, 16264,
+ 16240, 936, 2704, 712, 16260,
+ 16244, 900, 2708, 748, 16256,
+ 16248, 860, 2708, 788, 16252,
+ 16248, 824, 2708, 824, 16248 };
+
+static const uint16_t filter_5tap_64p_183[165] = {
+ 228, 1816, 1816, 228, 0,
+ 216, 1792, 1836, 248, 16380,
+ 200, 1772, 1860, 264, 16376,
+ 184, 1748, 1884, 280, 16376,
+ 168, 1728, 1904, 300, 16372,
+ 156, 1704, 1928, 316, 16368,
+ 144, 1680, 1948, 336, 16364,
+ 128, 1656, 1968, 356, 16364,
+ 116, 1632, 1988, 376, 16360,
+ 104, 1604, 2008, 396, 16356,
+ 96, 1580, 2024, 416, 16356,
+ 84, 1556, 2044, 440, 16352,
+ 72, 1528, 2060, 460, 16348,
+ 64, 1504, 2076, 484, 16348,
+ 52, 1476, 2092, 504, 16344,
+ 44, 1448, 2104, 528, 16344,
+ 36, 1424, 2120, 552, 16340,
+ 28, 1396, 2132, 576, 16340,
+ 20, 1368, 2144, 600, 16340,
+ 12, 1340, 2156, 624, 16336,
+ 4, 1312, 2168, 652, 16336,
+ 0, 1284, 2180, 676, 16336,
+ 16376, 1256, 2188, 700, 16332,
+ 16372, 1228, 2196, 728, 16332,
+ 16368, 1200, 2204, 752, 16332,
+ 16364, 1172, 2212, 780, 16332,
+ 16356, 1144, 2216, 808, 16332,
+ 16352, 1116, 2220, 836, 16332,
+ 16352, 1084, 2224, 860, 16332,
+ 16348, 1056, 2228, 888, 16336,
+ 16344, 1028, 2232, 916, 16336,
+ 16340, 1000, 2232, 944, 16336,
+ 16340, 972, 2232, 972, 16340 };
+
+static const uint16_t filter_6tap_64p_upscale[198] = {
+ 0, 0, 4092, 0, 0, 0,
+ 12, 16332, 4092, 52, 16368, 0,
+ 24, 16280, 4088, 108, 16356, 0,
+ 36, 16236, 4080, 168, 16340, 0,
+ 44, 16188, 4064, 228, 16324, 0,
+ 56, 16148, 4052, 292, 16308, 0,
+ 64, 16108, 4032, 356, 16292, 4,
+ 72, 16072, 4008, 424, 16276, 4,
+ 80, 16036, 3980, 492, 16256, 4,
+ 88, 16004, 3952, 564, 16240, 8,
+ 96, 15972, 3920, 636, 16220, 8,
+ 100, 15944, 3884, 712, 16204, 12,
+ 108, 15916, 3844, 788, 16184, 16,
+ 112, 15896, 3800, 864, 16164, 20,
+ 116, 15872, 3756, 944, 16144, 20,
+ 120, 15852, 3708, 1024, 16124, 24,
+ 120, 15836, 3656, 1108, 16104, 28,
+ 124, 15824, 3600, 1192, 16084, 32,
+ 124, 15808, 3544, 1276, 16064, 36,
+ 124, 15800, 3484, 1360, 16044, 40,
+ 128, 15792, 3420, 1448, 16024, 44,
+ 128, 15784, 3352, 1536, 16004, 48,
+ 124, 15780, 3288, 1624, 15988, 52,
+ 124, 15776, 3216, 1712, 15968, 56,
+ 124, 15776, 3144, 1800, 15948, 64,
+ 120, 15776, 3068, 1888, 15932, 68,
+ 120, 15780, 2992, 1976, 15912, 72,
+ 116, 15784, 2916, 2064, 15896, 76,
+ 112, 15792, 2836, 2152, 15880, 80,
+ 108, 15796, 2752, 2244, 15868, 84,
+ 104, 15804, 2672, 2328, 15852, 88,
+ 104, 15816, 2588, 2416, 15840, 92,
+ 100, 15828, 2504, 2504, 15828, 100 };
+
+static const uint16_t filter_6tap_64p_117[198] = {
+ 16168, 476, 3568, 476, 16168, 0,
+ 16180, 428, 3564, 528, 16156, 0,
+ 16192, 376, 3556, 584, 16144, 4,
+ 16204, 328, 3548, 636, 16128, 4,
+ 16216, 280, 3540, 692, 16116, 8,
+ 16228, 232, 3524, 748, 16104, 12,
+ 16240, 188, 3512, 808, 16092, 12,
+ 16252, 148, 3492, 864, 16080, 16,
+ 16264, 104, 3472, 924, 16068, 16,
+ 16276, 64, 3452, 984, 16056, 20,
+ 16284, 28, 3428, 1044, 16048, 24,
+ 16296, 16376, 3400, 1108, 16036, 24,
+ 16304, 16340, 3372, 1168, 16024, 28,
+ 16316, 16304, 3340, 1232, 16016, 32,
+ 16324, 16272, 3308, 1296, 16004, 32,
+ 16332, 16244, 3272, 1360, 15996, 36,
+ 16344, 16212, 3236, 1424, 15988, 36,
+ 16352, 16188, 3200, 1488, 15980, 40,
+ 16360, 16160, 3160, 1552, 15972, 40,
+ 16368, 16136, 3116, 1616, 15964, 40,
+ 16372, 16112, 3072, 1680, 15956, 44,
+ 16380, 16092, 3028, 1744, 15952, 44,
+ 0, 16072, 2980, 1808, 15948, 44,
+ 8, 16052, 2932, 1872, 15944, 48,
+ 12, 16036, 2880, 1936, 15940, 48,
+ 16, 16020, 2828, 2000, 15936, 48,
+ 20, 16008, 2776, 2064, 15936, 48,
+ 24, 15996, 2724, 2128, 15936, 48,
+ 28, 15984, 2668, 2192, 15936, 48,
+ 32, 15972, 2612, 2252, 15940, 44,
+ 36, 15964, 2552, 2316, 15940, 44,
+ 40, 15956, 2496, 2376, 15944, 44,
+ 40, 15952, 2436, 2436, 15952, 40 };
+
+static const uint16_t filter_6tap_64p_150[198] = {
+ 16148, 920, 2724, 920, 16148, 0,
+ 16152, 880, 2724, 956, 16148, 0,
+ 16152, 844, 2720, 996, 16144, 0,
+ 16156, 804, 2716, 1032, 16144, 0,
+ 16156, 768, 2712, 1072, 16144, 0,
+ 16160, 732, 2708, 1112, 16144, 16380,
+ 16164, 696, 2700, 1152, 16144, 16380,
+ 16168, 660, 2692, 1192, 16148, 16380,
+ 16172, 628, 2684, 1232, 16148, 16380,
+ 16176, 592, 2672, 1272, 16152, 16376,
+ 16180, 560, 2660, 1312, 16152, 16376,
+ 16184, 524, 2648, 1348, 16156, 16376,
+ 16192, 492, 2632, 1388, 16160, 16372,
+ 16196, 460, 2616, 1428, 16164, 16372,
+ 16200, 432, 2600, 1468, 16168, 16368,
+ 16204, 400, 2584, 1508, 16176, 16364,
+ 16212, 368, 2564, 1548, 16180, 16364,
+ 16216, 340, 2544, 1588, 16188, 16360,
+ 16220, 312, 2524, 1628, 16196, 16356,
+ 16228, 284, 2504, 1668, 16204, 16356,
+ 16232, 256, 2480, 1704, 16212, 16352,
+ 16240, 232, 2456, 1744, 16224, 16348,
+ 16244, 204, 2432, 1780, 16232, 16344,
+ 16248, 180, 2408, 1820, 16244, 16340,
+ 16256, 156, 2380, 1856, 16256, 16336,
+ 16260, 132, 2352, 1896, 16268, 16332,
+ 16268, 108, 2324, 1932, 16280, 16328,
+ 16272, 88, 2296, 1968, 16292, 16324,
+ 16276, 64, 2268, 2004, 16308, 16320,
+ 16284, 44, 2236, 2036, 16324, 16312,
+ 16288, 24, 2204, 2072, 16340, 16308,
+ 16292, 8, 2172, 2108, 16356, 16304,
+ 16300, 16372, 2140, 2140, 16372, 16300 };
+
+static const uint16_t filter_6tap_64p_183[198] = {
+ 16296, 1032, 2196, 1032, 16296, 0,
+ 16292, 1004, 2200, 1060, 16304, 16380,
+ 16288, 976, 2200, 1088, 16308, 16380,
+ 16284, 952, 2196, 1116, 16312, 16376,
+ 16284, 924, 2196, 1144, 16320, 16376,
+ 16280, 900, 2192, 1172, 16324, 16372,
+ 16276, 872, 2192, 1200, 16332, 16368,
+ 16276, 848, 2188, 1228, 16340, 16368,
+ 16272, 820, 2180, 1256, 16348, 16364,
+ 16272, 796, 2176, 1280, 16356, 16360,
+ 16268, 768, 2168, 1308, 16364, 16360,
+ 16268, 744, 2164, 1336, 16372, 16356,
+ 16268, 716, 2156, 1364, 16380, 16352,
+ 16264, 692, 2148, 1392, 4, 16352,
+ 16264, 668, 2136, 1420, 16, 16348,
+ 16264, 644, 2128, 1448, 28, 16344,
+ 16264, 620, 2116, 1472, 36, 16340,
+ 16264, 596, 2108, 1500, 48, 16340,
+ 16268, 572, 2096, 1524, 60, 16336,
+ 16268, 548, 2080, 1552, 72, 16332,
+ 16268, 524, 2068, 1576, 88, 16328,
+ 16268, 504, 2056, 1604, 100, 16324,
+ 16272, 480, 2040, 1628, 112, 16324,
+ 16272, 456, 2024, 1652, 128, 16320,
+ 16272, 436, 2008, 1680, 144, 16316,
+ 16276, 416, 1992, 1704, 156, 16312,
+ 16276, 392, 1976, 1724, 172, 16308,
+ 16280, 372, 1956, 1748, 188, 16308,
+ 16280, 352, 1940, 1772, 204, 16304,
+ 16284, 332, 1920, 1796, 224, 16300,
+ 16288, 312, 1900, 1816, 240, 16296,
+ 16288, 296, 1880, 1840, 256, 16296,
+ 16292, 276, 1860, 1860, 276, 16292 };
+
+static const uint16_t filter_7tap_64p_upscale[231] = {
+ 176, 15760, 2488, 2488, 15760, 176, 0,
+ 172, 15772, 2404, 2572, 15752, 180, 16380,
+ 168, 15784, 2324, 2656, 15740, 184, 16380,
+ 164, 15800, 2240, 2736, 15732, 188, 16376,
+ 160, 15812, 2152, 2816, 15728, 192, 16376,
+ 152, 15828, 2068, 2896, 15724, 192, 16376,
+ 148, 15848, 1984, 2972, 15720, 196, 16372,
+ 140, 15864, 1896, 3048, 15720, 196, 16372,
+ 136, 15884, 1812, 3124, 15720, 196, 16368,
+ 128, 15900, 1724, 3196, 15720, 196, 16368,
+ 120, 15920, 1640, 3268, 15724, 196, 16368,
+ 116, 15940, 1552, 3336, 15732, 196, 16364,
+ 108, 15964, 1468, 3400, 15740, 196, 16364,
+ 104, 15984, 1384, 3464, 15748, 192, 16364,
+ 96, 16004, 1300, 3524, 15760, 188, 16364,
+ 88, 16028, 1216, 3584, 15776, 184, 16364,
+ 84, 16048, 1132, 3640, 15792, 180, 16360,
+ 76, 16072, 1048, 3692, 15812, 176, 16360,
+ 68, 16092, 968, 3744, 15832, 168, 16360,
+ 64, 16116, 888, 3788, 15856, 160, 16360,
+ 56, 16140, 812, 3832, 15884, 152, 16360,
+ 52, 16160, 732, 3876, 15912, 144, 16360,
+ 44, 16184, 656, 3912, 15944, 136, 16364,
+ 40, 16204, 584, 3944, 15976, 124, 16364,
+ 32, 16228, 512, 3976, 16012, 116, 16364,
+ 28, 16248, 440, 4004, 16048, 104, 16364,
+ 24, 16268, 372, 4028, 16092, 88, 16368,
+ 20, 16288, 304, 4048, 16132, 76, 16368,
+ 12, 16308, 240, 4064, 16180, 60, 16372,
+ 8, 16328, 176, 4076, 16228, 48, 16372,
+ 4, 16348, 112, 4088, 16276, 32, 16376,
+ 0, 16364, 56, 4092, 16328, 16, 16380,
+ 0, 0, 0, 4096, 0, 0, 0 };
+
+static const uint16_t filter_7tap_64p_117[231] = {
+ 92, 15868, 2464, 2464, 15868, 92, 0,
+ 96, 15864, 2404, 2528, 15876, 88, 0,
+ 100, 15860, 2344, 2584, 15884, 84, 0,
+ 104, 15856, 2280, 2644, 15892, 76, 0,
+ 108, 15852, 2216, 2700, 15904, 72, 0,
+ 108, 15852, 2152, 2756, 15916, 64, 0,
+ 112, 15852, 2088, 2812, 15932, 60, 0,
+ 112, 15852, 2024, 2864, 15948, 52, 0,
+ 112, 15856, 1960, 2916, 15964, 44, 0,
+ 116, 15860, 1892, 2964, 15984, 36, 0,
+ 116, 15864, 1828, 3016, 16004, 24, 4,
+ 116, 15868, 1760, 3060, 16024, 16, 4,
+ 116, 15876, 1696, 3108, 16048, 8, 8,
+ 116, 15884, 1628, 3152, 16072, 16380, 8,
+ 112, 15892, 1564, 3192, 16100, 16372, 8,
+ 112, 15900, 1496, 3232, 16124, 16360, 12,
+ 112, 15908, 1428, 3268, 16156, 16348, 12,
+ 108, 15920, 1364, 3304, 16188, 16336, 16,
+ 108, 15928, 1300, 3340, 16220, 16324, 20,
+ 104, 15940, 1232, 3372, 16252, 16312, 20,
+ 104, 15952, 1168, 3400, 16288, 16300, 24,
+ 100, 15964, 1104, 3428, 16328, 16284, 28,
+ 96, 15980, 1040, 3452, 16364, 16272, 28,
+ 96, 15992, 976, 3476, 20, 16256, 32,
+ 92, 16004, 916, 3496, 64, 16244, 36,
+ 88, 16020, 856, 3516, 108, 16228, 40,
+ 84, 16032, 792, 3532, 152, 16216, 44,
+ 80, 16048, 732, 3544, 200, 16200, 48,
+ 80, 16064, 676, 3556, 248, 16184, 48,
+ 76, 16080, 616, 3564, 296, 16168, 52,
+ 72, 16092, 560, 3568, 344, 16156, 56,
+ 68, 16108, 504, 3572, 396, 16140, 60,
+ 64, 16124, 452, 3576, 452, 16124, 64 };
+
+static const uint16_t filter_7tap_64p_150[231] = {
+ 16224, 16380, 2208, 2208, 16380, 16224, 0,
+ 16232, 16360, 2172, 2236, 16, 16216, 0,
+ 16236, 16340, 2140, 2268, 40, 16212, 0,
+ 16244, 16324, 2104, 2296, 60, 16204, 4,
+ 16252, 16304, 2072, 2324, 84, 16196, 4,
+ 16256, 16288, 2036, 2352, 108, 16192, 4,
+ 16264, 16268, 2000, 2380, 132, 16184, 8,
+ 16272, 16252, 1960, 2408, 160, 16176, 8,
+ 16276, 16240, 1924, 2432, 184, 16172, 8,
+ 16284, 16224, 1888, 2456, 212, 16164, 8,
+ 16288, 16212, 1848, 2480, 240, 16160, 12,
+ 16296, 16196, 1812, 2500, 268, 16152, 12,
+ 16300, 16184, 1772, 2524, 296, 16144, 12,
+ 16308, 16172, 1736, 2544, 324, 16140, 12,
+ 16312, 16164, 1696, 2564, 356, 16136, 12,
+ 16320, 16152, 1656, 2584, 388, 16128, 12,
+ 16324, 16144, 1616, 2600, 416, 16124, 12,
+ 16328, 16136, 1576, 2616, 448, 16116, 12,
+ 16332, 16128, 1536, 2632, 480, 16112, 12,
+ 16340, 16120, 1496, 2648, 516, 16108, 12,
+ 16344, 16112, 1456, 2660, 548, 16104, 12,
+ 16348, 16104, 1416, 2672, 580, 16100, 12,
+ 16352, 16100, 1376, 2684, 616, 16096, 12,
+ 16356, 16096, 1336, 2696, 652, 16092, 12,
+ 16360, 16092, 1296, 2704, 688, 16088, 12,
+ 16364, 16088, 1256, 2712, 720, 16084, 12,
+ 16368, 16084, 1220, 2720, 760, 16084, 8,
+ 16368, 16080, 1180, 2724, 796, 16080, 8,
+ 16372, 16080, 1140, 2732, 832, 16080, 8,
+ 16376, 16076, 1100, 2732, 868, 16076, 4,
+ 16380, 16076, 1060, 2736, 908, 16076, 4,
+ 16380, 16076, 1020, 2740, 944, 16076, 0,
+ 0, 16076, 984, 2740, 984, 16076, 0 };
+
+static const uint16_t filter_7tap_64p_183[231] = {
+ 16216, 324, 1884, 1884, 324, 16216, 0,
+ 16220, 304, 1864, 1904, 344, 16216, 0,
+ 16224, 284, 1844, 1924, 364, 16216, 0,
+ 16224, 264, 1824, 1944, 384, 16212, 16380,
+ 16228, 248, 1804, 1960, 408, 16212, 16380,
+ 16228, 228, 1784, 1976, 428, 16208, 16380,
+ 16232, 212, 1760, 1996, 452, 16208, 16380,
+ 16236, 192, 1740, 2012, 472, 16208, 16376,
+ 16240, 176, 1716, 2028, 496, 16208, 16376,
+ 16240, 160, 1696, 2040, 516, 16208, 16376,
+ 16244, 144, 1672, 2056, 540, 16208, 16376,
+ 16248, 128, 1648, 2068, 564, 16208, 16372,
+ 16252, 112, 1624, 2084, 588, 16208, 16372,
+ 16256, 96, 1600, 2096, 612, 16208, 16368,
+ 16256, 84, 1576, 2108, 636, 16208, 16368,
+ 16260, 68, 1552, 2120, 660, 16208, 16368,
+ 16264, 56, 1524, 2132, 684, 16212, 16364,
+ 16268, 40, 1500, 2140, 712, 16212, 16364,
+ 16272, 28, 1476, 2152, 736, 16216, 16360,
+ 16276, 16, 1448, 2160, 760, 16216, 16356,
+ 16280, 4, 1424, 2168, 788, 16220, 16356,
+ 16284, 16376, 1396, 2176, 812, 16224, 16352,
+ 16288, 16368, 1372, 2184, 840, 16224, 16352,
+ 16292, 16356, 1344, 2188, 864, 16228, 16348,
+ 16292, 16344, 1320, 2196, 892, 16232, 16344,
+ 16296, 16336, 1292, 2200, 916, 16236, 16344,
+ 16300, 16324, 1264, 2204, 944, 16240, 16340,
+ 16304, 16316, 1240, 2208, 972, 16248, 16336,
+ 16308, 16308, 1212, 2212, 996, 16252, 16332,
+ 16312, 16300, 1184, 2216, 1024, 16256, 16332,
+ 16316, 16292, 1160, 2216, 1052, 16264, 16328,
+ 16316, 16284, 1132, 2216, 1076, 16268, 16324,
+ 16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+
+static const uint16_t filter_8tap_64p_upscale[264] = {
+ 0, 0, 0, 4096, 0, 0, 0, 0,
+ 16376, 20, 16328, 4092, 56, 16364, 4, 0,
+ 16372, 36, 16272, 4088, 116, 16340, 12, 0,
+ 16364, 56, 16220, 4080, 180, 16320, 20, 0,
+ 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
+ 16356, 92, 16124, 4048, 312, 16276, 32, 16380,
+ 16352, 108, 16080, 4032, 380, 16252, 40, 16380,
+ 16344, 124, 16036, 4008, 452, 16228, 48, 16380,
+ 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
+ 16340, 152, 15956, 3952, 600, 16180, 64, 16376,
+ 16336, 164, 15920, 3920, 672, 16156, 76, 16376,
+ 16332, 176, 15888, 3884, 752, 16132, 84, 16376,
+ 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
+ 16328, 200, 15828, 3800, 908, 16080, 100, 16372,
+ 16324, 208, 15804, 3756, 992, 16056, 108, 16372,
+ 16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
+ 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
+ 16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
+ 16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
+ 16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
+ 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
+ 16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
+ 16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
+ 16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
+ 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
+ 16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
+ 16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
+ 16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
+ 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
+ 16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
+ 16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
+ 16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
+ 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+
+static const uint16_t filter_8tap_64p_117[264] = {
+ 116, 16100, 428, 3564, 428, 16100, 116, 0,
+ 112, 16116, 376, 3564, 484, 16084, 120, 16380,
+ 104, 16136, 324, 3560, 540, 16064, 124, 16380,
+ 100, 16152, 272, 3556, 600, 16048, 128, 16380,
+ 96, 16168, 220, 3548, 656, 16032, 136, 16376,
+ 88, 16188, 172, 3540, 716, 16016, 140, 16376,
+ 84, 16204, 124, 3528, 780, 16000, 144, 16376,
+ 80, 16220, 76, 3512, 840, 15984, 148, 16372,
+ 76, 16236, 32, 3496, 904, 15968, 152, 16372,
+ 68, 16252, 16376, 3480, 968, 15952, 156, 16372,
+ 64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
+ 60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
+ 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
+ 48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
+ 44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
+ 40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
+ 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
+ 32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
+ 28, 4, 16052, 3208, 1564, 15836, 176, 16364,
+ 24, 16, 16028, 3168, 1632, 15828, 176, 16364,
+ 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
+ 16, 40, 15976, 3080, 1768, 15812, 176, 16364,
+ 12, 52, 15952, 3036, 1836, 15808, 176, 16364,
+ 8, 64, 15932, 2988, 1904, 15800, 176, 16364,
+ 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
+ 4, 84, 15892, 2888, 2040, 15796, 172, 16364,
+ 0, 96, 15876, 2836, 2104, 15792, 168, 16364,
+ 16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
+ 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
+ 16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
+ 16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
+ 16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
+ 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+
+static const uint16_t filter_8tap_64p_150[264] = {
+ 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
+ 0, 16020, 992, 2756, 1068, 16024, 16376, 0,
+ 4, 16020, 952, 2752, 1108, 16024, 16372, 0,
+ 8, 16020, 916, 2748, 1148, 16028, 16368, 0,
+ 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
+ 16, 16020, 840, 2740, 1224, 16036, 16356, 4,
+ 20, 16024, 800, 2732, 1264, 16040, 16352, 4,
+ 20, 16024, 764, 2724, 1304, 16044, 16348, 8,
+ 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
+ 28, 16028, 692, 2704, 1380, 16056, 16336, 12,
+ 28, 16032, 656, 2696, 1420, 16064, 16328, 12,
+ 32, 16036, 620, 2684, 1460, 16072, 16324, 12,
+ 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
+ 36, 16044, 548, 2656, 1536, 16088, 16308, 16,
+ 36, 16048, 516, 2640, 1576, 16096, 16304, 20,
+ 40, 16052, 480, 2624, 1612, 16108, 16296, 20,
+ 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
+ 44, 16064, 416, 2588, 1692, 16132, 16280, 24,
+ 44, 16068, 384, 2568, 1728, 16144, 16276, 24,
+ 44, 16076, 352, 2548, 1764, 16156, 16268, 28,
+ 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
+ 44, 16088, 292, 2508, 1840, 16184, 16252, 28,
+ 44, 16096, 264, 2484, 1876, 16200, 16244, 32,
+ 48, 16100, 232, 2460, 1912, 16216, 16236, 32,
+ 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
+ 48, 16116, 176, 2412, 1980, 16248, 16220, 36,
+ 48, 16124, 152, 2384, 2016, 16264, 16216, 36,
+ 44, 16128, 124, 2356, 2052, 16284, 16208, 36,
+ 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
+ 44, 16144, 72, 2300, 2116, 16324, 16192, 40,
+ 44, 16152, 48, 2272, 2148, 16344, 16184, 40,
+ 44, 16160, 24, 2244, 2180, 16364, 16176, 40,
+ 44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+
+static const uint16_t filter_8tap_64p_183[264] = {
+ 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
+ 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
+ 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
+ 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
+ 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
+ 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
+ 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
+ 16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
+ 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
+ 16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
+ 16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
+ 16308, 16188, 868, 2200, 1448, 0, 16220, 4,
+ 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
+ 16316, 16180, 816, 2184, 1500, 28, 16212, 4,
+ 16320, 16176, 792, 2172, 1524, 40, 16208, 4,
+ 16324, 16172, 764, 2164, 1548, 56, 16204, 0,
+ 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
+ 16328, 16168, 712, 2144, 1596, 88, 16196, 0,
+ 16332, 16164, 688, 2132, 1620, 100, 16192, 0,
+ 16336, 16164, 664, 2120, 1644, 120, 16192, 0,
+ 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
+ 16344, 16160, 616, 2096, 1688, 152, 16184, 0,
+ 16344, 16160, 592, 2080, 1712, 168, 16180, 0,
+ 16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
+ 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
+ 16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
+ 16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
+ 16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
+ 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
+ 16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
+ 16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
+ 16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
+ 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+
+const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_3tap_16p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_3tap_16p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_3tap_16p_150;
+ else
+ return filter_3tap_16p_183;
+}
+
+const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_3tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_3tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_3tap_64p_150;
+ else
+ return filter_3tap_64p_183;
+}
+
+const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_4tap_16p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_4tap_16p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_4tap_16p_150;
+ else
+ return filter_4tap_16p_183;
+}
+
+const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_4tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_4tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_4tap_64p_150;
+ else
+ return filter_4tap_64p_183;
+}
+
+const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_5tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_5tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_5tap_64p_150;
+ else
+ return filter_5tap_64p_183;
+}
+
+const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_6tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_6tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_6tap_64p_150;
+ else
+ return filter_6tap_64p_183;
+}
+
+const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_7tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_7tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_7tap_64p_150;
+ else
+ return filter_7tap_64p_183;
+}
+
+const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
+{
+ if (ratio.value < dal_fixed31_32_one.value)
+ return filter_8tap_64p_upscale;
+ else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value)
+ return filter_8tap_64p_117;
+ else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value)
+ return filter_8tap_64p_150;
+ else
+ return filter_8tap_64p_183;
+}
+
+const uint16_t *get_filter_2tap_16p(void)
+{
+ return filter_2tap_16p;
+}
+
+const uint16_t *get_filter_2tap_64p(void)
+{
+ return filter_2tap_64p;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
new file mode 100644
index 000000000000..e42b6eb1c1f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -0,0 +1,1620 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dce_stream_encoder.h"
+#include "reg_helper.h"
+
+enum DP_PIXEL_ENCODING {
+DP_PIXEL_ENCODING_RGB444 = 0x00000000,
+DP_PIXEL_ENCODING_YCBCR422 = 0x00000001,
+DP_PIXEL_ENCODING_YCBCR444 = 0x00000002,
+DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003,
+DP_PIXEL_ENCODING_Y_ONLY = 0x00000004,
+DP_PIXEL_ENCODING_YCBCR420 = 0x00000005,
+DP_PIXEL_ENCODING_RESERVED = 0x00000006,
+};
+
+
+enum DP_COMPONENT_DEPTH {
+DP_COMPONENT_DEPTH_6BPC = 0x00000000,
+DP_COMPONENT_DEPTH_8BPC = 0x00000001,
+DP_COMPONENT_DEPTH_10BPC = 0x00000002,
+DP_COMPONENT_DEPTH_12BPC = 0x00000003,
+DP_COMPONENT_DEPTH_16BPC = 0x00000004,
+DP_COMPONENT_DEPTH_RESERVED = 0x00000005,
+};
+
+
+#define REG(reg)\
+ (enc110->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc110->se_shift->field_name, enc110->se_mask->field_name
+
+#define VBI_LINE_0 0
+#define DP_BLANK_MAX_RETRY 20
+#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#endif
+
+enum {
+ DP_MST_UPDATE_MAX_RETRY = 50
+};
+
+#define DCE110_SE(audio)\
+ container_of(audio, struct dce110_stream_encoder, base)
+
+#define CTX \
+ enc110->base.ctx
+
+static void dce110_update_generic_info_packet(
+ struct dce110_stream_encoder *enc110,
+ uint32_t packet_index,
+ const struct encoder_info_packet *info_packet)
+{
+ uint32_t regval;
+ /* TODOFPGA Figure out a proper number for max_retries polling for lock
+ * use 50 for now.
+ */
+ uint32_t max_retries = 50;
+
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ if (packet_index >= 8)
+ ASSERT(0);
+
+ /* poll dig_update_lock is not locked -> asic internal signal
+ * assume otg master lock will unlock it
+ */
+/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS,
+ 0, 10, max_retries);*/
+
+ /* check if HW reading GSP memory */
+ REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT,
+ 0, 10, max_retries);
+
+ /* HW does is not reading GSP memory not reading too long ->
+ * something wrong. clear GPS memory access and notify?
+ * hw SW is writing to GSP memory
+ */
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
+ }
+ /* choose which generic packet to use */
+ {
+ regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC_INDEX, packet_index);
+ }
+
+ /* write generic packet header
+ * (4th byte is for GENERIC0 only) */
+ {
+ REG_SET_4(AFMT_GENERIC_HDR, 0,
+ AFMT_GENERIC_HB0, info_packet->hb0,
+ AFMT_GENERIC_HB1, info_packet->hb1,
+ AFMT_GENERIC_HB2, info_packet->hb2,
+ AFMT_GENERIC_HB3, info_packet->hb3);
+ }
+
+ /* write generic packet contents
+ * (we never use last 4 bytes)
+ * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers */
+ {
+ const uint32_t *content =
+ (const uint32_t *) &info_packet->sb[0];
+
+ REG_WRITE(AFMT_GENERIC_0, *content++);
+ REG_WRITE(AFMT_GENERIC_1, *content++);
+ REG_WRITE(AFMT_GENERIC_2, *content++);
+ REG_WRITE(AFMT_GENERIC_3, *content++);
+ REG_WRITE(AFMT_GENERIC_4, *content++);
+ REG_WRITE(AFMT_GENERIC_5, *content++);
+ REG_WRITE(AFMT_GENERIC_6, *content++);
+ REG_WRITE(AFMT_GENERIC_7, *content);
+ }
+
+ if (!REG(AFMT_VBI_PACKET_CONTROL1)) {
+ /* force double-buffered packet update */
+ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL,
+ AFMT_GENERIC0_UPDATE, (packet_index == 0),
+ AFMT_GENERIC2_UPDATE, (packet_index == 2));
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(AFMT_VBI_PACKET_CONTROL1)) {
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC0_FRAME_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC1_FRAME_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC2_FRAME_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC3_FRAME_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC4_FRAME_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC5_FRAME_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC6_FRAME_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, 1);
+ break;
+ default:
+ break;
+ }
+ }
+#endif
+}
+
+static void dce110_update_hdmi_info_packet(
+ struct dce110_stream_encoder *enc110,
+ uint32_t packet_index,
+ const struct encoder_info_packet *info_packet)
+{
+ struct dc_context *ctx = enc110->base.ctx;
+ uint32_t cont, send, line;
+
+ if (info_packet->valid) {
+ dce110_update_generic_info_packet(
+ enc110,
+ packet_index,
+ info_packet);
+
+ /* enable transmission of packet(s) -
+ * packet transmission begins on the next frame */
+ cont = 1;
+ /* send packet(s) every frame */
+ send = 1;
+ /* select line number to send packets on */
+ line = 2;
+ } else {
+ cont = 0;
+ send = 0;
+ line = 0;
+ }
+
+ /* choose which generic packet control to use */
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 1:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 2:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 3:
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case 4:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 5:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+ case 6:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC0_CONT, cont,
+ HDMI_GENERIC0_SEND, send,
+ HDMI_GENERIC0_LINE, line);
+ break;
+ case 7:
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3,
+ HDMI_GENERIC1_CONT, cont,
+ HDMI_GENERIC1_SEND, send,
+ HDMI_GENERIC1_LINE, line);
+ break;
+#endif
+ default:
+ /* invalid HW packet index */
+ dm_logger_write(
+ ctx->logger, LOG_WARNING,
+ "Invalid HW packet index: %s()\n",
+ __func__);
+ return;
+ }
+}
+
+/* setup stream encoder in dp mode */
+static void dce110_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t misc0 = 0;
+ uint32_t misc1 = 0;
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint8_t synchronous_clock = 0; /* asynchronous mode */
+ uint8_t colorimetry_bpc;
+#endif
+
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_DB_CNTL))
+ REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1);
+#endif
+
+ /* set pixel encoding */
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR422);
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR444);
+
+ if (crtc_timing->flags.Y_ONLY)
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_666)
+ /* HW testing only, no use case yet.
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits */
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_Y_ONLY);
+ /* Note: DP_MSA_MISC1 bit 7 is the indicator
+ * of Y-only mode.
+ * This bit is set in HW if register
+ * DP_PIXEL_ENCODING is programmed to 0x4 */
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_YCBCR420);
+ if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->DP_VID_N_MUL)
+ REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
+#endif
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING,
+ DP_PIXEL_ENCODING_RGB444);
+ break;
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_MSA_MISC))
+ misc1 = REG_READ(DP_MSA_MISC);
+#endif
+
+ /* set color depth */
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ 0);
+ break;
+ case COLOR_DEPTH_888:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_8BPC);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_10BPC);
+
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_12BPC);
+ break;
+ default:
+ REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH,
+ DP_COMPONENT_DEPTH_6BPC);
+ break;
+ }
+
+ /* set dynamic range and YCbCr range */
+ if (enc110->se_mask->DP_DYN_RANGE && enc110->se_mask->DP_YCBCR_RANGE)
+ REG_UPDATE_2(
+ DP_PIXEL_FORMAT,
+ DP_DYN_RANGE, 0,
+ DP_YCBCR_RANGE, 0);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ colorimetry_bpc = 0;
+ break;
+ case COLOR_DEPTH_888:
+ colorimetry_bpc = 1;
+ break;
+ case COLOR_DEPTH_101010:
+ colorimetry_bpc = 2;
+ break;
+ case COLOR_DEPTH_121212:
+ colorimetry_bpc = 3;
+ break;
+ default:
+ colorimetry_bpc = 0;
+ break;
+ }
+
+ misc0 = misc0 | synchronous_clock;
+ misc0 = colorimetry_bpc << 5;
+
+ if (REG(DP_MSA_TIMING_PARAM1)) {
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ misc0 = misc0 | 0x0;
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_YCBCR601:
+ misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_YCBCR709:
+ misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* do nothing */
+ break;
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (REG(DP_MSA_COLORIMETRY))
+ REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
+
+ if (REG(DP_MSA_MISC))
+ REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */
+
+ /* dcn new register
+ * dc_crtc_timing is vesa dmt struct. data from edid
+ */
+ if (REG(DP_MSA_TIMING_PARAM1))
+ REG_SET_2(DP_MSA_TIMING_PARAM1, 0,
+ DP_MSA_HTOTAL, crtc_timing->h_total,
+ DP_MSA_VTOTAL, crtc_timing->v_total);
+#endif
+
+ /* calcuate from vesa timing parameters
+ * h_active_start related to leading edge of sync
+ */
+
+ h_blank = crtc_timing->h_total - crtc_timing->h_border_left -
+ crtc_timing->h_addressable - crtc_timing->h_border_right;
+
+ h_back_porch = h_blank - crtc_timing->h_front_porch -
+ crtc_timing->h_sync_width;
+
+ /* start at begining of left border */
+ h_active_start = crtc_timing->h_sync_width + h_back_porch;
+
+
+ v_active_start = crtc_timing->v_total - crtc_timing->v_border_top -
+ crtc_timing->v_addressable - crtc_timing->v_border_bottom -
+ crtc_timing->v_front_porch;
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* start at begining of left border */
+ if (REG(DP_MSA_TIMING_PARAM2))
+ REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
+ DP_MSA_HSTART, h_active_start,
+ DP_MSA_VSTART, v_active_start);
+
+ if (REG(DP_MSA_TIMING_PARAM3))
+ REG_SET_4(DP_MSA_TIMING_PARAM3, 0,
+ DP_MSA_HSYNCWIDTH,
+ crtc_timing->h_sync_width,
+ DP_MSA_HSYNCPOLARITY,
+ !crtc_timing->flags.HSYNC_POSITIVE_POLARITY,
+ DP_MSA_VSYNCWIDTH,
+ crtc_timing->v_sync_width,
+ DP_MSA_VSYNCPOLARITY,
+ !crtc_timing->flags.VSYNC_POSITIVE_POLARITY);
+
+ /* HWDITH include border or overscan */
+ if (REG(DP_MSA_TIMING_PARAM4))
+ REG_SET_2(DP_MSA_TIMING_PARAM4, 0,
+ DP_MSA_HWIDTH, crtc_timing->h_border_left +
+ crtc_timing->h_addressable + crtc_timing->h_border_right,
+ DP_MSA_VHEIGHT, crtc_timing->v_border_top +
+ crtc_timing->v_addressable + crtc_timing->v_border_bottom);
+#endif
+ }
+#endif
+}
+
+static void dce110_stream_encoder_set_stream_attribute_helper(
+ struct dce110_stream_encoder *enc110,
+ struct dc_crtc_timing *crtc_timing)
+{
+ if (enc110->regs->TMDS_CNTL) {
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 1);
+ break;
+ default:
+ REG_UPDATE(TMDS_CNTL, TMDS_PIXEL_ENCODING, 0);
+ break;
+ }
+ REG_UPDATE(TMDS_CNTL, TMDS_COLOR_FORMAT, 0);
+ } else if (enc110->regs->DIG_FE_CNTL) {
+ switch (crtc_timing->pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1);
+ break;
+ default:
+ REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0);
+ break;
+ }
+ REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0);
+ }
+
+}
+
+/* setup stream encoder in hdmi mode */
+static void dce110_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc110->base.id;
+ cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+
+ /* setup HDMI engine */
+ if (!enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+ REG_UPDATE_3(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else if (enc110->regs->DIG_FE_CNTL) {
+ REG_UPDATE_5(HDMI_CONTROL,
+ HDMI_PACKET_GEN_VERSION, 1,
+ HDMI_KEEPOUT_MODE, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0,
+ HDMI_DATA_SCRAMBLE_EN, 0,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+
+ switch (crtc_timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
+ break;
+ case COLOR_DEPTH_101010:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 1,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_121212:
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 0);
+ } else {
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 2,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ }
+ break;
+ case COLOR_DEPTH_161616:
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DEEP_COLOR_DEPTH, 3,
+ HDMI_DEEP_COLOR_ENABLE, 1);
+ break;
+ default:
+ break;
+ }
+
+ if (enc110->se_mask->HDMI_DATA_SCRAMBLE_EN) {
+ if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) {
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_RATE_MORE_340M
+ * Clock channel frequency is 1/4 of character rate.
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 1);
+ } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) {
+
+ /* TODO: New feature for DCE11, still need to implement */
+
+ /* enable HDMI data scrambler
+ * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE
+ * Clock channel frequency is the same
+ * as character rate
+ */
+ REG_UPDATE_2(HDMI_CONTROL,
+ HDMI_DATA_SCRAMBLE_EN, 1,
+ HDMI_CLOCK_CHANNEL_RATE, 0);
+ }
+ }
+
+ REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL,
+ HDMI_GC_CONT, 1,
+ HDMI_GC_SEND, 1,
+ HDMI_NULL_SEND, 1);
+
+ /* following belongs to audio */
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0);
+
+}
+
+/* setup stream encoder in dvi mode */
+static void dce110_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ struct bp_encoder_control cntl = {0};
+
+ cntl.action = ENCODER_CONTROL_SETUP;
+ cntl.engine_id = enc110->base.id;
+ cntl.signal = is_dual_link ?
+ SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK;
+ cntl.enable_dp_audio = false;
+ cntl.pixel_clock = crtc_timing->pix_clk_khz;
+ cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+ return;
+
+ ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB);
+ ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888);
+ dce110_stream_encoder_set_stream_attribute_helper(enc110, crtc_timing);
+}
+
+static void dce110_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t x = dal_fixed31_32_floor(
+ avg_time_slots_per_mtp);
+ uint32_t y = dal_fixed31_32_ceil(
+ dal_fixed31_32_shl(
+ dal_fixed31_32_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 26));
+
+ {
+ REG_SET_2(DP_MSE_RATE_CNTL, 0,
+ DP_MSE_RATE_X, x,
+ DP_MSE_RATE_Y, y);
+ }
+
+ /* wait for update to be completed on the link */
+ /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */
+ /* is reset to 0 (not pending) */
+ REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING,
+ 0,
+ 10, DP_MST_UPDATE_MAX_RETRY);
+}
+
+static void dce110_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+ enc110->se_mask->HDMI_AVI_INFO_SEND) {
+
+ if (info_frame->avi.valid) {
+ const uint32_t *content =
+ (const uint32_t *) &info_frame->avi.sb[0];
+
+ REG_WRITE(AFMT_AVI_INFO0, content[0]);
+
+ REG_WRITE(AFMT_AVI_INFO1, content[1]);
+
+ REG_WRITE(AFMT_AVI_INFO2, content[2]);
+
+ REG_WRITE(AFMT_AVI_INFO3, content[3]);
+
+ REG_UPDATE(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION,
+ info_frame->avi.hb1);
+
+ REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+ HDMI_AVI_INFO_SEND, 1,
+ HDMI_AVI_INFO_CONT, 1);
+
+ REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE,
+ VBI_LINE_0 + 2);
+
+ } else {
+ REG_UPDATE_2(HDMI_INFOFRAME_CONTROL0,
+ HDMI_AVI_INFO_SEND, 0,
+ HDMI_AVI_INFO_CONT, 0);
+ }
+ }
+
+ if (enc110->se_mask->HDMI_AVI_INFO_CONT &&
+ enc110->se_mask->HDMI_AVI_INFO_SEND) {
+ dce110_update_hdmi_info_packet(enc110, 0, &info_frame->vendor);
+ dce110_update_hdmi_info_packet(enc110, 1, &info_frame->gamut);
+ dce110_update_hdmi_info_packet(enc110, 2, &info_frame->spd);
+ dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->HDMI_DB_DISABLE) {
+ /* for bring up, disable dp double TODO */
+ if (REG(HDMI_DB_CONTROL))
+ REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1);
+
+ dce110_update_hdmi_info_packet(enc110, 0, &info_frame->avi);
+ dce110_update_hdmi_info_packet(enc110, 1, &info_frame->vendor);
+ dce110_update_hdmi_info_packet(enc110, 2, &info_frame->gamut);
+ dce110_update_hdmi_info_packet(enc110, 3, &info_frame->spd);
+ dce110_update_hdmi_info_packet(enc110, 4, &info_frame->hdrsmd);
+ }
+#endif
+}
+
+static void dce110_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* stop generic packets 0 & 1 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0);
+
+ /* stop generic packets 2 & 3 on HDMI */
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* stop generic packets 2 & 3 on HDMI */
+ if (REG(HDMI_GENERIC_PACKET_CONTROL2))
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+
+ if (REG(HDMI_GENERIC_PACKET_CONTROL3))
+ REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0,
+ HDMI_GENERIC0_CONT, 0,
+ HDMI_GENERIC0_LINE, 0,
+ HDMI_GENERIC0_SEND, 0,
+ HDMI_GENERIC1_CONT, 0,
+ HDMI_GENERIC1_LINE, 0,
+ HDMI_GENERIC1_SEND, 0);
+#endif
+}
+
+static void dce110_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ if (info_frame->vsc.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 0, /* packetIndex */
+ &info_frame->vsc);
+
+ if (info_frame->spd.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 2, /* packetIndex */
+ &info_frame->spd);
+
+ if (info_frame->hdrsmd.valid)
+ dce110_update_generic_info_packet(
+ enc110,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd);
+
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+
+ /* This bit is the master enable bit.
+ * When enabling secondary stream engine,
+ * this master bit must also be set.
+ * This register shared with audio info frame.
+ * Therefore we need to enable master bit
+ * if at least on of the fields is not 0
+ */
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc)
+{
+ /* stop generic packets on DP */
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ if (enc110->se_mask->DP_SEC_AVI_ENABLE) {
+ REG_SET_7(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+ DP_SEC_GSP1_ENABLE, 0,
+ DP_SEC_GSP2_ENABLE, 0,
+ DP_SEC_GSP3_ENABLE, 0,
+ DP_SEC_AVI_ENABLE, 0,
+ DP_SEC_MPG_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (enc110->se_mask->DP_SEC_GSP7_ENABLE) {
+ REG_SET_10(DP_SEC_CNTL, 0,
+ DP_SEC_GSP0_ENABLE, 0,
+ DP_SEC_GSP1_ENABLE, 0,
+ DP_SEC_GSP2_ENABLE, 0,
+ DP_SEC_GSP3_ENABLE, 0,
+ DP_SEC_GSP4_ENABLE, 0,
+ DP_SEC_GSP5_ENABLE, 0,
+ DP_SEC_GSP6_ENABLE, 0,
+ DP_SEC_GSP7_ENABLE, 0,
+ DP_SEC_MPG_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+ }
+#endif
+ /* this register shared with audio info frame.
+ * therefore we need to keep master enabled
+ * if at least one of the fields is not 0 */
+
+ if (value)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+static void dce110_stream_encoder_dp_blank(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t retries = 0;
+ uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
+
+ /* Note: For CZ, we are changing driver default to disable
+ * stream deferred to next VBLANK. If results are positive, we
+ * will make the same change to all DCE versions. There are a
+ * handful of panels that cannot handle disable stream at
+ * HBLANK and will result in a white line flash across the
+ * screen on stream disable. */
+
+ /* Specify the video stream disable point
+ * (2 = start of the next vertical blank) */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
+ /* Larger delay to wait until VBLANK - use max retry of
+ * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
+ max_retries = DP_BLANK_MAX_RETRY * 150;
+
+ /* disable DP stream */
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+
+ /* the encoder stops sending the video stream
+ * at the start of the vertical blanking.
+ * Poll for DP_VID_STREAM_STATUS == 0
+ */
+
+ REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
+ 0,
+ 10, max_retries);
+
+ ASSERT(retries <= max_retries);
+
+ /* Tell the DP encoder to ignore timing from CRTC, must be done after
+ * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+ * complete, stream status will be stuck in video stream enabled state,
+ * i.e. DP_VID_STREAM_STATUS stuck at 1.
+ */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+}
+
+/* output video stream to link encoder */
+static void dce110_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+ uint32_t n_vid = 0x8000;
+ uint32_t m_vid;
+
+ /* M / N = Fstream / Flink
+ * m_vid / n_vid = pixel rate / link rate
+ */
+
+ uint64_t m_vid_l = n_vid;
+
+ m_vid_l *= param->pixel_clk_khz;
+ m_vid_l = div_u64(m_vid_l,
+ param->link_settings.link_rate
+ * LINK_RATE_REF_FREQ_IN_KHZ);
+
+ m_vid = (uint32_t) m_vid_l;
+
+ /* enable auto measurement */
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0);
+
+ /* auto measurement need 1 full 0x8000 symbol cycle to kick in,
+ * therefore program initial value for Mvid and Nvid
+ */
+
+ REG_UPDATE(DP_VID_N, DP_VID_N, n_vid);
+
+ REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
+
+ REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ }
+
+ /* set DIG_START to 0x1 to resync FIFO */
+
+ REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
+
+ /* switch DP encoder to CRTC data */
+
+ REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0);
+
+ /* wait 100us for DIG/DP logic to prime
+ * (i.e. a few video lines)
+ */
+ udelay(100);
+
+ /* the hardware would start sending video at the start of the next DP
+ * frame (i.e. rising edge of the vblank).
+ * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this
+ * register has no effect on enable transition! HW always guarantees
+ * VID_STREAM enable at start of next frame, and this is not
+ * programmable
+ */
+
+ REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+}
+
+static void dce110_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ unsigned int value = enable ? 1 : 0;
+
+ REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value);
+}
+
+
+#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000
+#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1
+
+#include "include/audio_types.h"
+
+/**
+* speakersToChannels
+*
+* @brief
+* translate speakers to channels
+*
+* FL - Front Left
+* FR - Front Right
+* RL - Rear Left
+* RR - Rear Right
+* RC - Rear Center
+* FC - Front Center
+* FLC - Front Left Center
+* FRC - Front Right Center
+* RLC - Rear Left Center
+* RRC - Rear Right Center
+* LFE - Low Freq Effect
+*
+* FC
+* FLC FRC
+* FL FR
+*
+* LFE
+* ()
+*
+*
+* RL RR
+* RLC RRC
+* RC
+*
+* ch 8 7 6 5 4 3 2 1
+* 0b00000011 - - - - - - FR FL
+* 0b00000111 - - - - - LFE FR FL
+* 0b00001011 - - - - FC - FR FL
+* 0b00001111 - - - - FC LFE FR FL
+* 0b00010011 - - - RC - - FR FL
+* 0b00010111 - - - RC - LFE FR FL
+* 0b00011011 - - - RC FC - FR FL
+* 0b00011111 - - - RC FC LFE FR FL
+* 0b00110011 - - RR RL - - FR FL
+* 0b00110111 - - RR RL - LFE FR FL
+* 0b00111011 - - RR RL FC - FR FL
+* 0b00111111 - - RR RL FC LFE FR FL
+* 0b01110011 - RC RR RL - - FR FL
+* 0b01110111 - RC RR RL - LFE FR FL
+* 0b01111011 - RC RR RL FC - FR FL
+* 0b01111111 - RC RR RL FC LFE FR FL
+* 0b11110011 RRC RLC RR RL - - FR FL
+* 0b11110111 RRC RLC RR RL - LFE FR FL
+* 0b11111011 RRC RLC RR RL FC - FR FL
+* 0b11111111 RRC RLC RR RL FC LFE FR FL
+* 0b11000011 FRC FLC - - - - FR FL
+* 0b11000111 FRC FLC - - - LFE FR FL
+* 0b11001011 FRC FLC - - FC - FR FL
+* 0b11001111 FRC FLC - - FC LFE FR FL
+* 0b11010011 FRC FLC - RC - - FR FL
+* 0b11010111 FRC FLC - RC - LFE FR FL
+* 0b11011011 FRC FLC - RC FC - FR FL
+* 0b11011111 FRC FLC - RC FC LFE FR FL
+* 0b11110011 FRC FLC RR RL - - FR FL
+* 0b11110111 FRC FLC RR RL - LFE FR FL
+* 0b11111011 FRC FLC RR RL FC - FR FL
+* 0b11111111 FRC FLC RR RL FC LFE FR FL
+*
+* @param
+* speakers - speaker information as it comes from CEA audio block
+*/
+/* translate speakers to channels */
+
+union audio_cea_channels {
+ uint8_t all;
+ struct audio_cea_channels_bits {
+ uint32_t FL:1;
+ uint32_t FR:1;
+ uint32_t LFE:1;
+ uint32_t FC:1;
+ uint32_t RL_RC:1;
+ uint32_t RR:1;
+ uint32_t RC_RLC_FLC:1;
+ uint32_t RRC_FRC:1;
+ } channels;
+};
+
+struct audio_clock_info {
+ /* pixel clock frequency*/
+ uint32_t pixel_clock_in_10khz;
+ /* N - 32KHz audio */
+ uint32_t n_32khz;
+ /* CTS - 32KHz audio*/
+ uint32_t cts_32khz;
+ uint32_t n_44khz;
+ uint32_t cts_44khz;
+ uint32_t n_48khz;
+ uint32_t cts_48khz;
+};
+
+/* 25.2MHz/1.001*/
+/* 25.2MHz/1.001*/
+/* 25.2MHz*/
+/* 27MHz */
+/* 27MHz*1.001*/
+/* 27MHz*1.001*/
+/* 54MHz*/
+/* 54MHz*1.001*/
+/* 74.25MHz/1.001*/
+/* 74.25MHz*/
+/* 148.5MHz/1.001*/
+/* 148.5MHz*/
+
+static const struct audio_clock_info audio_clock_info_table[16] = {
+ {2517, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2518, 4576, 28125, 7007, 31250, 6864, 28125},
+ {2520, 4096, 25200, 6272, 28000, 6144, 25200},
+ {2700, 4096, 27000, 6272, 30000, 6144, 27000},
+ {2702, 4096, 27027, 6272, 30030, 6144, 27027},
+ {2703, 4096, 27027, 6272, 30030, 6144, 27027},
+ {5400, 4096, 54000, 6272, 60000, 6144, 54000},
+ {5405, 4096, 54054, 6272, 60060, 6144, 54054},
+ {7417, 11648, 210937, 17836, 234375, 11648, 140625},
+ {7425, 4096, 74250, 6272, 82500, 6144, 74250},
+ {14835, 11648, 421875, 8918, 234375, 5824, 140625},
+ {14850, 4096, 148500, 6272, 165000, 6144, 148500},
+ {29670, 5824, 421875, 4459, 234375, 5824, 281250},
+ {29700, 3072, 222750, 4704, 247500, 5120, 247500},
+ {59340, 5824, 843750, 8918, 937500, 5824, 562500},
+ {59400, 3072, 445500, 9408, 990000, 6144, 594000}
+};
+
+static const struct audio_clock_info audio_clock_info_table_36bpc[14] = {
+ {2517, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2518, 9152, 84375, 7007, 48875, 9152, 56250},
+ {2520, 4096, 37800, 6272, 42000, 6144, 37800},
+ {2700, 4096, 40500, 6272, 45000, 6144, 40500},
+ {2702, 8192, 81081, 6272, 45045, 8192, 54054},
+ {2703, 8192, 81081, 6272, 45045, 8192, 54054},
+ {5400, 4096, 81000, 6272, 90000, 6144, 81000},
+ {5405, 4096, 81081, 6272, 90090, 6144, 81081},
+ {7417, 11648, 316406, 17836, 351562, 11648, 210937},
+ {7425, 4096, 111375, 6272, 123750, 6144, 111375},
+ {14835, 11648, 632812, 17836, 703125, 11648, 421875},
+ {14850, 4096, 222750, 6272, 247500, 6144, 222750},
+ {29670, 5824, 632812, 8918, 703125, 5824, 421875},
+ {29700, 4096, 445500, 4704, 371250, 5120, 371250}
+};
+
+static const struct audio_clock_info audio_clock_info_table_48bpc[14] = {
+ {2517, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2518, 4576, 56250, 7007, 62500, 6864, 56250},
+ {2520, 4096, 50400, 6272, 56000, 6144, 50400},
+ {2700, 4096, 54000, 6272, 60000, 6144, 54000},
+ {2702, 4096, 54054, 6267, 60060, 8192, 54054},
+ {2703, 4096, 54054, 6272, 60060, 8192, 54054},
+ {5400, 4096, 108000, 6272, 120000, 6144, 108000},
+ {5405, 4096, 108108, 6272, 120120, 6144, 108108},
+ {7417, 11648, 421875, 17836, 468750, 11648, 281250},
+ {7425, 4096, 148500, 6272, 165000, 6144, 148500},
+ {14835, 11648, 843750, 8918, 468750, 11648, 281250},
+ {14850, 4096, 297000, 6272, 330000, 6144, 297000},
+ {29670, 5824, 843750, 4459, 468750, 5824, 562500},
+ {29700, 3072, 445500, 4704, 495000, 5120, 495000}
+
+
+};
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static uint32_t calc_max_audio_packets_per_line(
+ const struct audio_crtc_info *crtc_info)
+{
+ uint32_t max_packets_per_line;
+
+ max_packets_per_line =
+ crtc_info->h_total - crtc_info->h_active;
+
+ if (crtc_info->pixel_repetition)
+ max_packets_per_line *= crtc_info->pixel_repetition;
+
+ /* for other hdmi features */
+ max_packets_per_line -= 58;
+ /* for Control Period */
+ max_packets_per_line -= 16;
+ /* Number of Audio Packets per Line */
+ max_packets_per_line /= 32;
+
+ return max_packets_per_line;
+}
+
+static void get_audio_clock_info(
+ enum dc_color_depth color_depth,
+ uint32_t crtc_pixel_clock_in_khz,
+ uint32_t actual_pixel_clock_in_khz,
+ struct audio_clock_info *audio_clock_info)
+{
+ const struct audio_clock_info *clock_info;
+ uint32_t index;
+ uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10;
+ uint32_t audio_array_size;
+
+ switch (color_depth) {
+ case COLOR_DEPTH_161616:
+ clock_info = audio_clock_info_table_48bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_48bpc);
+ break;
+ case COLOR_DEPTH_121212:
+ clock_info = audio_clock_info_table_36bpc;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table_36bpc);
+ break;
+ default:
+ clock_info = audio_clock_info_table;
+ audio_array_size = ARRAY_SIZE(
+ audio_clock_info_table);
+ break;
+ }
+
+ if (clock_info != NULL) {
+ /* search for exact pixel clock in table */
+ for (index = 0; index < audio_array_size; index++) {
+ if (clock_info[index].pixel_clock_in_10khz >
+ crtc_pixel_clock_in_10khz)
+ break; /* not match */
+ else if (clock_info[index].pixel_clock_in_10khz ==
+ crtc_pixel_clock_in_10khz) {
+ /* match found */
+ *audio_clock_info = clock_info[index];
+ return;
+ }
+ }
+ }
+
+ /* not found */
+ if (actual_pixel_clock_in_khz == 0)
+ actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz;
+
+ /* See HDMI spec the table entry under
+ * pixel clock of "Other". */
+ audio_clock_info->pixel_clock_in_10khz =
+ actual_pixel_clock_in_khz / 10;
+ audio_clock_info->cts_32khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_44khz = actual_pixel_clock_in_khz;
+ audio_clock_info->cts_48khz = actual_pixel_clock_in_khz;
+
+ audio_clock_info->n_32khz = 4096;
+ audio_clock_info->n_44khz = 6272;
+ audio_clock_info->n_48khz = 6144;
+}
+
+static void dce110_se_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ if (audio_info == NULL)
+ /* This should not happen.it does so we don't get BSOD*/
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* setup the audio stream source select (audio -> dig mapping) */
+ REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst);
+
+ /* Channel allocation */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
+}
+
+static void dce110_se_setup_hdmi_audio(
+ struct stream_encoder *enc,
+ const struct audio_crtc_info *crtc_info)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ struct audio_clock_info audio_clock_info = {0};
+ uint32_t max_packets_per_line;
+
+ /* For now still do calculation, although this field is ignored when
+ above HDMI_PACKET_GEN_VERSION set to 1 */
+ max_packets_per_line = calc_max_audio_packets_per_line(crtc_info);
+
+ /* HDMI_AUDIO_PACKET_CONTROL */
+ REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL,
+ HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line,
+ HDMI_AUDIO_DELAY_EN, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* HDMI_ACR_PACKET_CONTROL */
+ REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL,
+ HDMI_ACR_AUTO_SEND, 1,
+ HDMI_ACR_SOURCE, 0,
+ HDMI_ACR_AUDIO_PRIORITY, 0);
+
+ /* Program audio clock sample/regeneration parameters */
+ get_audio_clock_info(crtc_info->color_depth,
+ crtc_info->requested_pixel_clock,
+ crtc_info->calculated_pixel_clock,
+ &audio_clock_info);
+ dm_logger_write(enc->ctx->logger, LOG_HW_AUDIO,
+ "\n%s:Input::requested_pixel_clock = %d" \
+ "calculated_pixel_clock = %d \n", __func__, \
+ crtc_info->requested_pixel_clock, \
+ crtc_info->calculated_pixel_clock);
+
+ /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz);
+
+ /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */
+ REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz);
+
+ /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz);
+
+ /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */
+ REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz);
+
+ /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz);
+
+ /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */
+ REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz);
+
+ /* Video driver cannot know in advance which sample rate will
+ be used by HD Audio driver
+ HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is
+ programmed below in interruppt callback */
+
+ /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK &
+ AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE_2(AFMT_60958_0,
+ AFMT_60958_CS_CHANNEL_NUMBER_L, 1,
+ AFMT_60958_CS_CLOCK_ACCURACY, 0);
+
+ /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */
+ REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+
+ /*AFMT_60958_2 now keep this settings until
+ * Programming guide comes out*/
+ REG_UPDATE_6(AFMT_60958_2,
+ AFMT_60958_CS_CHANNEL_NUMBER_2, 3,
+ AFMT_60958_CS_CHANNEL_NUMBER_3, 4,
+ AFMT_60958_CS_CHANNEL_NUMBER_4, 5,
+ AFMT_60958_CS_CHANNEL_NUMBER_5, 6,
+ AFMT_60958_CS_CHANNEL_NUMBER_6, 7,
+ AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+}
+
+static void dce110_se_setup_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* --- DP Audio packet configurations --- */
+
+ /* ATP Configuration */
+ REG_SET(DP_SEC_AUD_N, 0,
+ DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT);
+
+ /* Async/auto-calc timestamp mode */
+ REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE,
+ DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC);
+
+ /* --- The following are the registers
+ * copied from the SetupHDMI --- */
+
+ /* AFMT_AUDIO_PACKET_CONTROL */
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+
+ /* AFMT_AUDIO_PACKET_CONTROL2 */
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2,
+ AFMT_AUDIO_LAYOUT_OVRD, 0,
+ AFMT_60958_OSF_OVRD, 0);
+
+ /* AFMT_INFOFRAME_CONTROL0 */
+ REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+
+ /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */
+ REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0);
+}
+
+static void dce110_se_enable_audio_clock(
+ struct stream_encoder *enc,
+ bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ if (REG(AFMT_CNTL) == 0)
+ return; /* DCE8/10 does not have this register */
+
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable);
+
+ /* wait for AFMT clock to turn on,
+ * expectation: this should complete in 1-2 reads
+ *
+ * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10);
+ *
+ * TODO: wait for clock_on does not work well. May need HW
+ * program sequence. But audio seems work normally even without wait
+ * for clock_on status change
+ */
+}
+
+static void dce110_se_enable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ /* Enable Audio packets */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
+
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(DP_SEC_CNTL,
+ DP_SEC_ATP_ENABLE, 1,
+ DP_SEC_AIP_ENABLE, 1);
+
+ /* Program STREAM_ENABLE after all the other enables. */
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+}
+
+static void dce110_se_disable_dp_audio(
+ struct stream_encoder *enc)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ uint32_t value = REG_READ(DP_SEC_CNTL);
+
+ /* Disable Audio packets */
+ REG_UPDATE_5(DP_SEC_CNTL,
+ DP_SEC_ASP_ENABLE, 0,
+ DP_SEC_ATP_ENABLE, 0,
+ DP_SEC_AIP_ENABLE, 0,
+ DP_SEC_ACM_ENABLE, 0,
+ DP_SEC_STREAM_ENABLE, 0);
+
+ /* This register shared with encoder info frame. Therefore we need to
+ keep master enabled if at least on of the fields is not 0 */
+ if (value != 0)
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+
+}
+
+void dce110_se_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
+}
+
+void dce110_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_dp_audio_enable(
+ struct stream_encoder *enc)
+{
+ dce110_se_enable_audio_clock(enc, true);
+ dce110_se_setup_dp_audio(enc);
+ dce110_se_enable_dp_audio(enc);
+}
+
+void dce110_se_dp_audio_disable(
+ struct stream_encoder *enc)
+{
+ dce110_se_disable_dp_audio(enc);
+ dce110_se_enable_audio_clock(enc, false);
+}
+
+void dce110_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info)
+{
+ dce110_se_enable_audio_clock(enc, true);
+ dce110_se_setup_hdmi_audio(enc, audio_crtc_info);
+ dce110_se_audio_setup(enc, az_inst, info);
+}
+
+void dce110_se_hdmi_audio_disable(
+ struct stream_encoder *enc)
+{
+ dce110_se_enable_audio_clock(enc, false);
+}
+
+
+static void setup_stereo_sync(
+ struct stream_encoder *enc,
+ int tg_inst, bool enable)
+{
+ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst);
+ REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable);
+}
+
+
+static const struct stream_encoder_funcs dce110_str_enc_funcs = {
+ .dp_set_stream_attribute =
+ dce110_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ dce110_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ dce110_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ dce110_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ dce110_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ dce110_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ dce110_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ dce110_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ dce110_stream_encoder_dp_blank,
+ .dp_unblank =
+ dce110_stream_encoder_dp_unblank,
+ .audio_mute_control = dce110_se_audio_mute_control,
+
+ .dp_audio_setup = dce110_se_dp_audio_setup,
+ .dp_audio_enable = dce110_se_dp_audio_enable,
+ .dp_audio_disable = dce110_se_dp_audio_disable,
+
+ .hdmi_audio_setup = dce110_se_hdmi_audio_setup,
+ .hdmi_audio_disable = dce110_se_hdmi_audio_disable,
+ .setup_stereo_sync = setup_stereo_sync,
+ .set_avmute = dce110_stream_encoder_set_avmute,
+
+};
+
+void dce110_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dce110_stream_enc_registers *regs,
+ const struct dce_stream_encoder_shift *se_shift,
+ const struct dce_stream_encoder_mask *se_mask)
+{
+ enc110->base.funcs = &dce110_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+ enc110->regs = regs;
+ enc110->se_shift = se_shift;
+ enc110->se_mask = se_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
new file mode 100644
index 000000000000..6c28229c76eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -0,0 +1,733 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_STREAM_ENCODER_DCE110_H__
+#define __DC_STREAM_ENCODER_DCE110_H__
+
+#include "stream_encoder.h"
+
+#define DCE110STRENC_FROM_STRENC(stream_encoder)\
+ container_of(stream_encoder, struct dce110_stream_encoder, base)
+
+#ifndef TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING_MASK 0x00000010L
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT_MASK 0x00000300L
+ #define TMDS_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x00000004
+ #define TMDS_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x00000008
+#endif
+
+
+#define SE_COMMON_REG_LIST_DCE_BASE(id) \
+ SE_COMMON_REG_LIST_BASE(id),\
+ SRI(AFMT_AVI_INFO0, DIG, id), \
+ SRI(AFMT_AVI_INFO1, DIG, id), \
+ SRI(AFMT_AVI_INFO2, DIG, id), \
+ SRI(AFMT_AVI_INFO3, DIG, id)
+
+#define SE_COMMON_REG_LIST_BASE(id) \
+ SRI(AFMT_GENERIC_0, DIG, id), \
+ SRI(AFMT_GENERIC_1, DIG, id), \
+ SRI(AFMT_GENERIC_2, DIG, id), \
+ SRI(AFMT_GENERIC_3, DIG, id), \
+ SRI(AFMT_GENERIC_4, DIG, id), \
+ SRI(AFMT_GENERIC_5, DIG, id), \
+ SRI(AFMT_GENERIC_6, DIG, id), \
+ SRI(AFMT_GENERIC_7, DIG, id), \
+ SRI(AFMT_GENERIC_HDR, DIG, id), \
+ SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \
+ SRI(AFMT_60958_0, DIG, id), \
+ SRI(AFMT_60958_1, DIG, id), \
+ SRI(AFMT_60958_2, DIG, id), \
+ SRI(DIG_FE_CNTL, DIG, id), \
+ SRI(HDMI_CONTROL, DIG, id), \
+ SRI(HDMI_GC, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \
+ SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \
+ SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \
+ SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\
+ SRI(HDMI_ACR_32_0, DIG, id),\
+ SRI(HDMI_ACR_32_1, DIG, id),\
+ SRI(HDMI_ACR_44_0, DIG, id),\
+ SRI(HDMI_ACR_44_1, DIG, id),\
+ SRI(HDMI_ACR_48_0, DIG, id),\
+ SRI(HDMI_ACR_48_1, DIG, id),\
+ SRI(TMDS_CNTL, DIG, id), \
+ SRI(DP_MSE_RATE_CNTL, DP, id), \
+ SRI(DP_MSE_RATE_UPDATE, DP, id), \
+ SRI(DP_PIXEL_FORMAT, DP, id), \
+ SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_STEER_FIFO, DP, id), \
+ SRI(DP_VID_M, DP, id), \
+ SRI(DP_VID_N, DP, id), \
+ SRI(DP_VID_STREAM_CNTL, DP, id), \
+ SRI(DP_VID_TIMING, DP, id), \
+ SRI(DP_SEC_AUD_N, DP, id), \
+ SRI(DP_SEC_TIMESTAMP, DP, id)
+
+#define SE_COMMON_REG_LIST(id)\
+ SE_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(AFMT_CNTL, DIG, id)
+
+#define SE_DCN_REG_LIST(id)\
+ SE_COMMON_REG_LIST_BASE(id),\
+ SRI(AFMT_CNTL, DIG, id),\
+ SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id),\
+ SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \
+ SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \
+ SRI(DP_DB_CNTL, DP, id), \
+ SRI(DP_MSA_MISC, DP, id), \
+ SRI(DP_MSA_COLORIMETRY, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM1, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM2, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM3, DP, id), \
+ SRI(DP_MSA_TIMING_PARAM4, DP, id), \
+ SRI(HDMI_DB_CONTROL, DIG, id)
+
+#define SE_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
+ SE_SF(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+ SE_SF(AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
+ SE_SF(DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
+ SE_SF(HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\
+ SE_SF(AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+ SE_SF(HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\
+ SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\
+ SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\
+ SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\
+ SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\
+ SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\
+ SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\
+ SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE80_100(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(TMDS_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(TMDS_CNTL, TMDS_COLOR_FORMAT, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\
+ SE_SF(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE112(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_DCE_COMMON(mask_sh),\
+ SE_SF(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\
+ SE_SF(HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\
+ SE_SF(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCE120(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC0_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC2_UPDATE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_DYN_RANGE, mask_sh),\
+ SE_SF(DP0_DP_PIXEL_FORMAT, DP_YCBCR_RANGE, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, mask_sh),\
+ SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AVI_ENABLE, mask_sh),\
+ SE_SF(DIG0_AFMT_AVI_INFO3, AFMT_AVI_INFO_VERSION, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, mask_sh)
+
+#define SE_COMMON_MASK_SH_LIST_DCN10(mask_sh)\
+ SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\
+ SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\
+ SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\
+ SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\
+ SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh)
+
+struct dce_stream_encoder_shift {
+ uint8_t AFMT_GENERIC_INDEX;
+ uint8_t AFMT_GENERIC0_UPDATE;
+ uint8_t AFMT_GENERIC2_UPDATE;
+ uint8_t AFMT_GENERIC_HB0;
+ uint8_t AFMT_GENERIC_HB1;
+ uint8_t AFMT_GENERIC_HB2;
+ uint8_t AFMT_GENERIC_HB3;
+ uint8_t AFMT_GENERIC_LOCK_STATUS;
+ uint8_t AFMT_GENERIC_CONFLICT;
+ uint8_t AFMT_GENERIC_CONFLICT_CLR;
+ uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+ uint8_t AFMT_GENERIC0_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC1_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC2_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC3_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC4_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC5_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC6_FRAME_UPDATE;
+ uint8_t AFMT_GENERIC7_FRAME_UPDATE;
+ uint8_t HDMI_GENERIC0_CONT;
+ uint8_t HDMI_GENERIC0_SEND;
+ uint8_t HDMI_GENERIC0_LINE;
+ uint8_t HDMI_GENERIC1_CONT;
+ uint8_t HDMI_GENERIC1_SEND;
+ uint8_t HDMI_GENERIC1_LINE;
+ uint8_t DP_PIXEL_ENCODING;
+ uint8_t DP_COMPONENT_DEPTH;
+ uint8_t DP_DYN_RANGE;
+ uint8_t DP_YCBCR_RANGE;
+ uint8_t HDMI_PACKET_GEN_VERSION;
+ uint8_t HDMI_KEEPOUT_MODE;
+ uint8_t HDMI_DEEP_COLOR_ENABLE;
+ uint8_t HDMI_CLOCK_CHANNEL_RATE;
+ uint8_t HDMI_DEEP_COLOR_DEPTH;
+ uint8_t HDMI_GC_CONT;
+ uint8_t HDMI_GC_SEND;
+ uint8_t HDMI_NULL_SEND;
+ uint8_t HDMI_DATA_SCRAMBLE_EN;
+ uint8_t HDMI_AUDIO_INFO_SEND;
+ uint8_t AFMT_AUDIO_INFO_UPDATE;
+ uint8_t HDMI_AUDIO_INFO_LINE;
+ uint8_t HDMI_GC_AVMUTE;
+ uint8_t DP_MSE_RATE_X;
+ uint8_t DP_MSE_RATE_Y;
+ uint8_t DP_MSE_RATE_UPDATE_PENDING;
+ uint8_t AFMT_AVI_INFO_VERSION;
+ uint8_t HDMI_AVI_INFO_SEND;
+ uint8_t HDMI_AVI_INFO_CONT;
+ uint8_t HDMI_AVI_INFO_LINE;
+ uint8_t DP_SEC_GSP0_ENABLE;
+ uint8_t DP_SEC_STREAM_ENABLE;
+ uint8_t DP_SEC_GSP1_ENABLE;
+ uint8_t DP_SEC_GSP2_ENABLE;
+ uint8_t DP_SEC_GSP3_ENABLE;
+ uint8_t DP_SEC_GSP4_ENABLE;
+ uint8_t DP_SEC_GSP5_ENABLE;
+ uint8_t DP_SEC_GSP6_ENABLE;
+ uint8_t DP_SEC_GSP7_ENABLE;
+ uint8_t DP_SEC_AVI_ENABLE;
+ uint8_t DP_SEC_MPG_ENABLE;
+ uint8_t DP_VID_STREAM_DIS_DEFER;
+ uint8_t DP_VID_STREAM_ENABLE;
+ uint8_t DP_VID_STREAM_STATUS;
+ uint8_t DP_STEER_FIFO_RESET;
+ uint8_t DP_VID_M_N_GEN_EN;
+ uint8_t DP_VID_N;
+ uint8_t DP_VID_M;
+ uint8_t DIG_START;
+ uint8_t AFMT_AUDIO_SRC_SELECT;
+ uint8_t AFMT_AUDIO_CHANNEL_ENABLE;
+ uint8_t HDMI_AUDIO_PACKETS_PER_LINE;
+ uint8_t HDMI_AUDIO_DELAY_EN;
+ uint8_t AFMT_60958_CS_UPDATE;
+ uint8_t AFMT_AUDIO_LAYOUT_OVRD;
+ uint8_t AFMT_60958_OSF_OVRD;
+ uint8_t HDMI_ACR_AUTO_SEND;
+ uint8_t HDMI_ACR_SOURCE;
+ uint8_t HDMI_ACR_AUDIO_PRIORITY;
+ uint8_t HDMI_ACR_CTS_32;
+ uint8_t HDMI_ACR_N_32;
+ uint8_t HDMI_ACR_CTS_44;
+ uint8_t HDMI_ACR_N_44;
+ uint8_t HDMI_ACR_CTS_48;
+ uint8_t HDMI_ACR_N_48;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+ uint8_t AFMT_60958_CS_CLOCK_ACCURACY;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+ uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+ uint8_t DP_SEC_AUD_N;
+ uint8_t DP_SEC_TIMESTAMP_MODE;
+ uint8_t DP_SEC_ASP_ENABLE;
+ uint8_t DP_SEC_ATP_ENABLE;
+ uint8_t DP_SEC_AIP_ENABLE;
+ uint8_t DP_SEC_ACM_ENABLE;
+ uint8_t AFMT_AUDIO_SAMPLE_SEND;
+ uint8_t AFMT_AUDIO_CLOCK_EN;
+ uint8_t TMDS_PIXEL_ENCODING;
+ uint8_t TMDS_COLOR_FORMAT;
+ uint8_t DIG_STEREOSYNC_SELECT;
+ uint8_t DIG_STEREOSYNC_GATE_EN;
+ uint8_t DP_DB_DISABLE;
+ uint8_t DP_MSA_MISC0;
+ uint8_t DP_MSA_HTOTAL;
+ uint8_t DP_MSA_VTOTAL;
+ uint8_t DP_MSA_HSTART;
+ uint8_t DP_MSA_VSTART;
+ uint8_t DP_MSA_HSYNCWIDTH;
+ uint8_t DP_MSA_HSYNCPOLARITY;
+ uint8_t DP_MSA_VSYNCWIDTH;
+ uint8_t DP_MSA_VSYNCPOLARITY;
+ uint8_t DP_MSA_HWIDTH;
+ uint8_t DP_MSA_VHEIGHT;
+ uint8_t HDMI_DB_DISABLE;
+ uint8_t DP_VID_N_MUL;
+ uint8_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce_stream_encoder_mask {
+ uint32_t AFMT_GENERIC_INDEX;
+ uint32_t AFMT_GENERIC0_UPDATE;
+ uint32_t AFMT_GENERIC2_UPDATE;
+ uint32_t AFMT_GENERIC_HB0;
+ uint32_t AFMT_GENERIC_HB1;
+ uint32_t AFMT_GENERIC_HB2;
+ uint32_t AFMT_GENERIC_HB3;
+ uint32_t AFMT_GENERIC_LOCK_STATUS;
+ uint32_t AFMT_GENERIC_CONFLICT;
+ uint32_t AFMT_GENERIC_CONFLICT_CLR;
+ uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING;
+ uint32_t AFMT_GENERIC0_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC1_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC2_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC3_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC4_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC5_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC6_FRAME_UPDATE;
+ uint32_t AFMT_GENERIC7_FRAME_UPDATE;
+ uint32_t HDMI_GENERIC0_CONT;
+ uint32_t HDMI_GENERIC0_SEND;
+ uint32_t HDMI_GENERIC0_LINE;
+ uint32_t HDMI_GENERIC1_CONT;
+ uint32_t HDMI_GENERIC1_SEND;
+ uint32_t HDMI_GENERIC1_LINE;
+ uint32_t DP_PIXEL_ENCODING;
+ uint32_t DP_COMPONENT_DEPTH;
+ uint32_t DP_DYN_RANGE;
+ uint32_t DP_YCBCR_RANGE;
+ uint32_t HDMI_PACKET_GEN_VERSION;
+ uint32_t HDMI_KEEPOUT_MODE;
+ uint32_t HDMI_DEEP_COLOR_ENABLE;
+ uint32_t HDMI_CLOCK_CHANNEL_RATE;
+ uint32_t HDMI_DEEP_COLOR_DEPTH;
+ uint32_t HDMI_GC_CONT;
+ uint32_t HDMI_GC_SEND;
+ uint32_t HDMI_NULL_SEND;
+ uint32_t HDMI_DATA_SCRAMBLE_EN;
+ uint32_t HDMI_AUDIO_INFO_SEND;
+ uint32_t AFMT_AUDIO_INFO_UPDATE;
+ uint32_t HDMI_AUDIO_INFO_LINE;
+ uint32_t HDMI_GC_AVMUTE;
+ uint32_t DP_MSE_RATE_X;
+ uint32_t DP_MSE_RATE_Y;
+ uint32_t DP_MSE_RATE_UPDATE_PENDING;
+ uint32_t AFMT_AVI_INFO_VERSION;
+ uint32_t HDMI_AVI_INFO_SEND;
+ uint32_t HDMI_AVI_INFO_CONT;
+ uint32_t HDMI_AVI_INFO_LINE;
+ uint32_t DP_SEC_GSP0_ENABLE;
+ uint32_t DP_SEC_STREAM_ENABLE;
+ uint32_t DP_SEC_GSP1_ENABLE;
+ uint32_t DP_SEC_GSP2_ENABLE;
+ uint32_t DP_SEC_GSP3_ENABLE;
+ uint32_t DP_SEC_GSP4_ENABLE;
+ uint32_t DP_SEC_GSP5_ENABLE;
+ uint32_t DP_SEC_GSP6_ENABLE;
+ uint32_t DP_SEC_GSP7_ENABLE;
+ uint32_t DP_SEC_AVI_ENABLE;
+ uint32_t DP_SEC_MPG_ENABLE;
+ uint32_t DP_VID_STREAM_DIS_DEFER;
+ uint32_t DP_VID_STREAM_ENABLE;
+ uint32_t DP_VID_STREAM_STATUS;
+ uint32_t DP_STEER_FIFO_RESET;
+ uint32_t DP_VID_M_N_GEN_EN;
+ uint32_t DP_VID_N;
+ uint32_t DP_VID_M;
+ uint32_t DIG_START;
+ uint32_t AFMT_AUDIO_SRC_SELECT;
+ uint32_t AFMT_AUDIO_CHANNEL_ENABLE;
+ uint32_t HDMI_AUDIO_PACKETS_PER_LINE;
+ uint32_t HDMI_AUDIO_DELAY_EN;
+ uint32_t AFMT_60958_CS_UPDATE;
+ uint32_t AFMT_AUDIO_LAYOUT_OVRD;
+ uint32_t AFMT_60958_OSF_OVRD;
+ uint32_t HDMI_ACR_AUTO_SEND;
+ uint32_t HDMI_ACR_SOURCE;
+ uint32_t HDMI_ACR_AUDIO_PRIORITY;
+ uint32_t HDMI_ACR_CTS_32;
+ uint32_t HDMI_ACR_N_32;
+ uint32_t HDMI_ACR_CTS_44;
+ uint32_t HDMI_ACR_N_44;
+ uint32_t HDMI_ACR_CTS_48;
+ uint32_t HDMI_ACR_N_48;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L;
+ uint32_t AFMT_60958_CS_CLOCK_ACCURACY;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6;
+ uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7;
+ uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_TIMESTAMP_MODE;
+ uint32_t DP_SEC_ASP_ENABLE;
+ uint32_t DP_SEC_ATP_ENABLE;
+ uint32_t DP_SEC_AIP_ENABLE;
+ uint32_t DP_SEC_ACM_ENABLE;
+ uint32_t AFMT_AUDIO_SAMPLE_SEND;
+ uint32_t AFMT_AUDIO_CLOCK_EN;
+ uint32_t TMDS_PIXEL_ENCODING;
+ uint32_t DIG_STEREOSYNC_SELECT;
+ uint32_t DIG_STEREOSYNC_GATE_EN;
+ uint32_t TMDS_COLOR_FORMAT;
+ uint32_t DP_DB_DISABLE;
+ uint32_t DP_MSA_MISC0;
+ uint32_t DP_MSA_HTOTAL;
+ uint32_t DP_MSA_VTOTAL;
+ uint32_t DP_MSA_HSTART;
+ uint32_t DP_MSA_VSTART;
+ uint32_t DP_MSA_HSYNCWIDTH;
+ uint32_t DP_MSA_HSYNCPOLARITY;
+ uint32_t DP_MSA_VSYNCWIDTH;
+ uint32_t DP_MSA_VSYNCPOLARITY;
+ uint32_t DP_MSA_HWIDTH;
+ uint32_t DP_MSA_VHEIGHT;
+ uint32_t HDMI_DB_DISABLE;
+ uint32_t DP_VID_N_MUL;
+ uint32_t DP_VID_M_DOUBLE_VALUE_EN;
+};
+
+struct dce110_stream_enc_registers {
+ uint32_t AFMT_CNTL;
+ uint32_t AFMT_AVI_INFO0;
+ uint32_t AFMT_AVI_INFO1;
+ uint32_t AFMT_AVI_INFO2;
+ uint32_t AFMT_AVI_INFO3;
+ uint32_t AFMT_GENERIC_0;
+ uint32_t AFMT_GENERIC_1;
+ uint32_t AFMT_GENERIC_2;
+ uint32_t AFMT_GENERIC_3;
+ uint32_t AFMT_GENERIC_4;
+ uint32_t AFMT_GENERIC_5;
+ uint32_t AFMT_GENERIC_6;
+ uint32_t AFMT_GENERIC_7;
+ uint32_t AFMT_GENERIC_HDR;
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_VBI_PACKET_CONTROL1;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t DIG_FE_CNTL;
+ uint32_t DP_MSE_RATE_CNTL;
+ uint32_t DP_MSE_RATE_UPDATE;
+ uint32_t DP_PIXEL_FORMAT;
+ uint32_t DP_SEC_CNTL;
+ uint32_t DP_STEER_FIFO;
+ uint32_t DP_VID_M;
+ uint32_t DP_VID_N;
+ uint32_t DP_VID_STREAM_CNTL;
+ uint32_t DP_VID_TIMING;
+ uint32_t DP_SEC_AUD_N;
+ uint32_t DP_SEC_TIMESTAMP;
+ uint32_t HDMI_CONTROL;
+ uint32_t HDMI_GC;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL0;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL1;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL2;
+ uint32_t HDMI_GENERIC_PACKET_CONTROL3;
+ uint32_t HDMI_INFOFRAME_CONTROL0;
+ uint32_t HDMI_INFOFRAME_CONTROL1;
+ uint32_t HDMI_VBI_PACKET_CONTROL;
+ uint32_t HDMI_AUDIO_PACKET_CONTROL;
+ uint32_t HDMI_ACR_PACKET_CONTROL;
+ uint32_t HDMI_ACR_32_0;
+ uint32_t HDMI_ACR_32_1;
+ uint32_t HDMI_ACR_44_0;
+ uint32_t HDMI_ACR_44_1;
+ uint32_t HDMI_ACR_48_0;
+ uint32_t HDMI_ACR_48_1;
+ uint32_t TMDS_CNTL;
+ uint32_t DP_DB_CNTL;
+ uint32_t DP_MSA_MISC;
+ uint32_t DP_MSA_COLORIMETRY;
+ uint32_t DP_MSA_TIMING_PARAM1;
+ uint32_t DP_MSA_TIMING_PARAM2;
+ uint32_t DP_MSA_TIMING_PARAM3;
+ uint32_t DP_MSA_TIMING_PARAM4;
+ uint32_t HDMI_DB_CONTROL;
+};
+
+struct dce110_stream_encoder {
+ struct stream_encoder base;
+ const struct dce110_stream_enc_registers *regs;
+ const struct dce_stream_encoder_shift *se_shift;
+ const struct dce_stream_encoder_mask *se_mask;
+};
+
+void dce110_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id,
+ const struct dce110_stream_enc_registers *regs,
+ const struct dce_stream_encoder_shift *se_shift,
+ const struct dce_stream_encoder_mask *se_mask);
+
+
+void dce110_se_audio_mute_control(
+ struct stream_encoder *enc, bool mute);
+
+void dce110_se_dp_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+void dce110_se_dp_audio_enable(
+ struct stream_encoder *enc);
+
+void dce110_se_dp_audio_disable(
+ struct stream_encoder *enc);
+
+void dce110_se_hdmi_audio_setup(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info);
+
+void dce110_se_hdmi_audio_disable(
+ struct stream_encoder *enc);
+
+#endif /* __DC_STREAM_ENCODER_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
new file mode 100644
index 000000000000..ae32af31eff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_transform.h"
+#include "reg_helper.h"
+#include "opp.h"
+#include "basics/conversion.h"
+#include "dc.h"
+
+#define REG(reg) \
+ (xfm_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ xfm_dce->xfm_shift->field_name, xfm_dce->xfm_mask->field_name
+
+#define CTX \
+ xfm_dce->base.ctx
+
+#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+#define GAMUT_MATRIX_SIZE 12
+#define SCL_PHASES 16
+
+enum dcp_out_trunc_round_mode {
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND
+};
+
+enum dcp_out_trunc_round_depth {
+ DCP_OUT_TRUNC_ROUND_DEPTH_14BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_13BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_11BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_9BIT,
+ DCP_OUT_TRUNC_ROUND_DEPTH_8BIT
+};
+
+/* defines the various methods of bit reduction available for use */
+enum dcp_bit_depth_reduction_mode {
+ DCP_BIT_DEPTH_REDUCTION_MODE_DITHER,
+ DCP_BIT_DEPTH_REDUCTION_MODE_ROUND,
+ DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE,
+ DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED,
+ DCP_BIT_DEPTH_REDUCTION_MODE_INVALID
+};
+
+enum dcp_spatial_dither_mode {
+ DCP_SPATIAL_DITHER_MODE_AAAA,
+ DCP_SPATIAL_DITHER_MODE_A_AA_A,
+ DCP_SPATIAL_DITHER_MODE_AABBAABB,
+ DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC,
+ DCP_SPATIAL_DITHER_MODE_INVALID
+};
+
+enum dcp_spatial_dither_depth {
+ DCP_SPATIAL_DITHER_DEPTH_30BPP,
+ DCP_SPATIAL_DITHER_DEPTH_24BPP
+};
+
+enum csc_color_mode {
+ /* 00 - BITS2:0 Bypass */
+ CSC_COLOR_MODE_GRAPHICS_BYPASS,
+ /* 01 - hard coded coefficient TV RGB */
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
+ /* 04 - programmable OUTPUT CSC coefficient */
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
+};
+
+enum grph_color_adjust_option {
+ GRPH_COLOR_MATRIX_HW_DEFAULT = 1,
+ GRPH_COLOR_MATRIX_SW
+};
+
+static const struct out_csc_color_matrix global_color_matrix[] = {
+{ COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+{ COLOR_SPACE_SRGB_LIMITED,
+ { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
+{ COLOR_SPACE_YCBCR601,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
+ 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
+ 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+/* TODO: correct values below */
+{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
+};
+
+static bool setup_scaling_configuration(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
+
+ if (data->taps.h_taps + data->taps.v_taps <= 2) {
+ /* Set bypass */
+ if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0)
+ REG_UPDATE_2(SCL_MODE, SCL_MODE, 0, SCL_PSCL_EN, 0);
+ else
+ REG_UPDATE(SCL_MODE, SCL_MODE, 0);
+ return false;
+ }
+
+ REG_SET_2(SCL_TAP_CONTROL, 0,
+ SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
+ SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
+
+ if (data->format <= PIXEL_FORMAT_GRPH_END)
+ REG_UPDATE(SCL_MODE, SCL_MODE, 1);
+ else
+ REG_UPDATE(SCL_MODE, SCL_MODE, 2);
+
+ if (xfm_dce->xfm_mask->SCL_PSCL_EN != 0)
+ REG_UPDATE(SCL_MODE, SCL_PSCL_EN, 1);
+
+ /* 1 - Replace out of bound pixels with edge */
+ REG_SET(SCL_CONTROL, 0, SCL_BOUNDARY_MODE, 1);
+
+ return true;
+}
+
+static void program_overscan(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ int overscan_right = data->h_active
+ - data->recout.x - data->recout.width;
+ int overscan_bottom = data->v_active
+ - data->recout.y - data->recout.height;
+
+ if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+
+ if (overscan_right < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_right = 0;
+ }
+ if (overscan_bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_bottom = 0;
+ }
+
+ REG_SET_2(EXT_OVERSCAN_LEFT_RIGHT, 0,
+ EXT_OVERSCAN_LEFT, data->recout.x,
+ EXT_OVERSCAN_RIGHT, overscan_right);
+ REG_SET_2(EXT_OVERSCAN_TOP_BOTTOM, 0,
+ EXT_OVERSCAN_TOP, data->recout.y,
+ EXT_OVERSCAN_BOTTOM, overscan_bottom);
+}
+
+static void program_multi_taps_filter(
+ struct dce_transform *xfm_dce,
+ int taps,
+ const uint16_t *coeffs,
+ enum ram_filter_type filter_type)
+{
+ int phase, pair;
+ int array_idx = 0;
+ int taps_pairs = (taps + 1) / 2;
+ int phases_to_program = SCL_PHASES / 2 + 1;
+
+ uint32_t power_ctl = 0;
+
+ if (!coeffs)
+ return;
+
+ /*We need to disable power gating on coeff memory to do programming*/
+ if (REG(DCFE_MEM_PWR_CTRL)) {
+ power_ctl = REG_READ(DCFE_MEM_PWR_CTRL);
+ REG_SET(DCFE_MEM_PWR_CTRL, power_ctl, SCL_COEFF_MEM_PWR_DIS, 1);
+
+ REG_WAIT(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, 0, 1, 10);
+ }
+ for (phase = 0; phase < phases_to_program; phase++) {
+ /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
+ phase 0 is unique and phase N/2 is unique if N is even*/
+ for (pair = 0; pair < taps_pairs; pair++) {
+ uint16_t odd_coeff = 0;
+ uint16_t even_coeff = coeffs[array_idx];
+
+ REG_SET_3(SCL_COEF_RAM_SELECT, 0,
+ SCL_C_RAM_FILTER_TYPE, filter_type,
+ SCL_C_RAM_PHASE, phase,
+ SCL_C_RAM_TAP_PAIR_IDX, pair);
+
+ if (taps % 2 && pair == taps_pairs - 1)
+ array_idx++;
+ else {
+ odd_coeff = coeffs[array_idx + 1];
+ array_idx += 2;
+ }
+
+ REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
+ SCL_C_RAM_EVEN_TAP_COEF_EN, 1,
+ SCL_C_RAM_EVEN_TAP_COEF, even_coeff,
+ SCL_C_RAM_ODD_TAP_COEF_EN, 1,
+ SCL_C_RAM_ODD_TAP_COEF, odd_coeff);
+ }
+ }
+
+ /*We need to restore power gating on coeff memory to initial state*/
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_WRITE(DCFE_MEM_PWR_CTRL, power_ctl);
+}
+
+static void program_viewport(
+ struct dce_transform *xfm_dce,
+ const struct rect *view_port)
+{
+ REG_SET_2(VIEWPORT_START, 0,
+ VIEWPORT_X_START, view_port->x,
+ VIEWPORT_Y_START, view_port->y);
+
+ REG_SET_2(VIEWPORT_SIZE, 0,
+ VIEWPORT_HEIGHT, view_port->height,
+ VIEWPORT_WIDTH, view_port->width);
+
+ /* TODO: add stereo support */
+}
+
+static void calculate_inits(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data,
+ struct scl_ratios_inits *inits)
+{
+ struct fixed31_32 h_init;
+ struct fixed31_32 v_init;
+
+ inits->h_int_scale_ratio =
+ dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio =
+ dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+
+ h_init =
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_add(
+ data->ratios.horz,
+ dal_fixed31_32_from_int(data->taps.h_taps + 1)),
+ 2);
+ inits->h_init.integer = dal_fixed31_32_floor(h_init);
+ inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5;
+
+ v_init =
+ dal_fixed31_32_div_int(
+ dal_fixed31_32_add(
+ data->ratios.vert,
+ dal_fixed31_32_from_int(data->taps.v_taps + 1)),
+ 2);
+ inits->v_init.integer = dal_fixed31_32_floor(v_init);
+ inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5;
+}
+
+static void program_scl_ratios_inits(
+ struct dce_transform *xfm_dce,
+ struct scl_ratios_inits *inits)
+{
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+ SCL_H_SCALE_RATIO, inits->h_int_scale_ratio);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+ SCL_V_SCALE_RATIO, inits->v_int_scale_ratio);
+
+ REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+ SCL_H_INIT_INT, inits->h_init.integer,
+ SCL_H_INIT_FRAC, inits->h_init.fraction);
+
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_INT, inits->v_init.integer,
+ SCL_V_INIT_FRAC, inits->v_init.fraction);
+
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+}
+
+static const uint16_t *get_filter_coeffs_16p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 4)
+ return get_filter_4tap_16p(ratio);
+ else if (taps == 3)
+ return get_filter_3tap_16p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_16p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static void dce_transform_set_scaler(
+ struct transform *xfm,
+ const struct scaler_data *data)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ bool is_scaling_required;
+ bool filter_updated = false;
+ const uint16_t *coeffs_v, *coeffs_h;
+
+ /*Use all three pieces of memory always*/
+ REG_SET_2(LB_MEMORY_CTRL, 0,
+ LB_MEMORY_CONFIG, 0,
+ LB_MEMORY_SIZE, xfm_dce->lb_memory_size);
+
+ /* Clear SCL_F_SHARP_CONTROL value to 0 */
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
+
+ /* 1. Program overscan */
+ program_overscan(xfm_dce, data);
+
+ /* 2. Program taps and configuration */
+ is_scaling_required = setup_scaling_configuration(xfm_dce, data);
+
+ if (is_scaling_required) {
+ /* 3. Calculate and program ratio, filter initialization */
+ struct scl_ratios_inits inits = { 0 };
+
+ calculate_inits(xfm_dce, data, &inits);
+
+ program_scl_ratios_inits(xfm_dce, &inits);
+
+ coeffs_v = get_filter_coeffs_16p(data->taps.v_taps, data->ratios.vert);
+ coeffs_h = get_filter_coeffs_16p(data->taps.h_taps, data->ratios.horz);
+
+ if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
+ /* 4. Program vertical filters */
+ if (xfm_dce->filter_v == NULL)
+ REG_SET(SCL_VERT_FILTER_CONTROL, 0,
+ SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_RGB_Y_VERTICAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_ALPHA_VERTICAL);
+
+ /* 5. Program horizontal filters */
+ if (xfm_dce->filter_h == NULL)
+ REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
+ SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_RGB_Y_HORIZONTAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_ALPHA_HORIZONTAL);
+
+ xfm_dce->filter_v = coeffs_v;
+ xfm_dce->filter_h = coeffs_h;
+ filter_updated = true;
+ }
+ }
+
+ /* 6. Program the viewport */
+ program_viewport(xfm_dce, &data->viewport);
+
+ /* 7. Set bit to flip to new coefficient memory */
+ if (filter_updated)
+ REG_UPDATE(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, 1);
+
+ REG_UPDATE(LB_DATA_FORMAT, ALPHA_EN, data->lb_params.alpha_en);
+}
+
+/*****************************************************************************
+ * set_clamp
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ * @brief
+ * Programs clamp according to panel bit depth.
+ *
+ *******************************************************************************/
+static void set_clamp(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth)
+{
+ int clamp_max = 0;
+
+ /* At the clamp block the data will be MSB aligned, so we set the max
+ * clamp accordingly.
+ * For example, the max value for 6 bits MSB aligned (14 bit bus) would
+ * be "11 1111 0000 0000" in binary, so 0x3F00.
+ */
+ switch (depth) {
+ case COLOR_DEPTH_666:
+ /* 6bit MSB aligned on 14 bit bus '11 1111 0000 0000' */
+ clamp_max = 0x3F00;
+ break;
+ case COLOR_DEPTH_888:
+ /* 8bit MSB aligned on 14 bit bus '11 1111 1100 0000' */
+ clamp_max = 0x3FC0;
+ break;
+ case COLOR_DEPTH_101010:
+ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+ clamp_max = 0x3FFC;
+ break;
+ case COLOR_DEPTH_121212:
+ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
+ clamp_max = 0x3FFF;
+ break;
+ default:
+ clamp_max = 0x3FC0;
+ BREAK_TO_DEBUGGER(); /* Invalid clamp bit depth */
+ }
+ REG_SET_2(OUT_CLAMP_CONTROL_B_CB, 0,
+ OUT_CLAMP_MIN_B_CB, 0,
+ OUT_CLAMP_MAX_B_CB, clamp_max);
+
+ REG_SET_2(OUT_CLAMP_CONTROL_G_Y, 0,
+ OUT_CLAMP_MIN_G_Y, 0,
+ OUT_CLAMP_MAX_G_Y, clamp_max);
+
+ REG_SET_2(OUT_CLAMP_CONTROL_R_CR, 0,
+ OUT_CLAMP_MIN_R_CR, 0,
+ OUT_CLAMP_MAX_R_CR, clamp_max);
+}
+
+/*******************************************************************************
+ * set_round
+ *
+ * @brief
+ * Programs Round/Truncate
+ *
+ * @param [in] mode :round or truncate
+ * @param [in] depth :bit depth to round/truncate to
+ OUT_ROUND_TRUNC_MODE 3:0 0xA Output data round or truncate mode
+ POSSIBLE VALUES:
+ 00 - truncate to u0.12
+ 01 - truncate to u0.11
+ 02 - truncate to u0.10
+ 03 - truncate to u0.9
+ 04 - truncate to u0.8
+ 05 - reserved
+ 06 - truncate to u0.14
+ 07 - truncate to u0.13 set_reg_field_value(
+ value,
+ clamp_max,
+ OUT_CLAMP_CONTROL_R_CR,
+ OUT_CLAMP_MAX_R_CR);
+ 08 - round to u0.12
+ 09 - round to u0.11
+ 10 - round to u0.10
+ 11 - round to u0.9
+ 12 - round to u0.8
+ 13 - reserved
+ 14 - round to u0.14
+ 15 - round to u0.13
+
+ ******************************************************************************/
+static void set_round(
+ struct dce_transform *xfm_dce,
+ enum dcp_out_trunc_round_mode mode,
+ enum dcp_out_trunc_round_depth depth)
+{
+ int depth_bits = 0;
+ int mode_bit = 0;
+
+ /* set up bit depth */
+ switch (depth) {
+ case DCP_OUT_TRUNC_ROUND_DEPTH_14BIT:
+ depth_bits = 6;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_13BIT:
+ depth_bits = 7;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_12BIT:
+ depth_bits = 0;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_11BIT:
+ depth_bits = 1;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_10BIT:
+ depth_bits = 2;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_9BIT:
+ depth_bits = 3;
+ break;
+ case DCP_OUT_TRUNC_ROUND_DEPTH_8BIT:
+ depth_bits = 4;
+ break;
+ default:
+ depth_bits = 4;
+ BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_depth */
+ }
+
+ /* set up round or truncate */
+ switch (mode) {
+ case DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE:
+ mode_bit = 0;
+ break;
+ case DCP_OUT_TRUNC_ROUND_MODE_ROUND:
+ mode_bit = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER(); /* Invalid dcp_out_trunc_round_mode */
+ }
+
+ depth_bits |= mode_bit << 3;
+
+ REG_SET(OUT_ROUND_CONTROL, 0, OUT_ROUND_TRUNC_MODE, depth_bits);
+}
+
+/*****************************************************************************
+ * set_dither
+ *
+ * @brief
+ * Programs Dither
+ *
+ * @param [in] dither_enable : enable dither
+ * @param [in] dither_mode : dither mode to set
+ * @param [in] dither_depth : bit depth to dither to
+ * @param [in] frame_random_enable : enable frame random
+ * @param [in] rgb_random_enable : enable rgb random
+ * @param [in] highpass_random_enable : enable highpass random
+ *
+ ******************************************************************************/
+
+static void set_dither(
+ struct dce_transform *xfm_dce,
+ bool dither_enable,
+ enum dcp_spatial_dither_mode dither_mode,
+ enum dcp_spatial_dither_depth dither_depth,
+ bool frame_random_enable,
+ bool rgb_random_enable,
+ bool highpass_random_enable)
+{
+ int dither_depth_bits = 0;
+ int dither_mode_bits = 0;
+
+ switch (dither_mode) {
+ case DCP_SPATIAL_DITHER_MODE_AAAA:
+ dither_mode_bits = 0;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_A_AA_A:
+ dither_mode_bits = 1;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_AABBAABB:
+ dither_mode_bits = 2;
+ break;
+ case DCP_SPATIAL_DITHER_MODE_AABBCCAABBCC:
+ dither_mode_bits = 3;
+ break;
+ default:
+ /* Invalid dcp_spatial_dither_mode */
+ BREAK_TO_DEBUGGER();
+ }
+
+ switch (dither_depth) {
+ case DCP_SPATIAL_DITHER_DEPTH_30BPP:
+ dither_depth_bits = 0;
+ break;
+ case DCP_SPATIAL_DITHER_DEPTH_24BPP:
+ dither_depth_bits = 1;
+ break;
+ default:
+ /* Invalid dcp_spatial_dither_depth */
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* write the register */
+ REG_SET_6(DCP_SPATIAL_DITHER_CNTL, 0,
+ DCP_SPATIAL_DITHER_EN, dither_enable,
+ DCP_SPATIAL_DITHER_MODE, dither_mode_bits,
+ DCP_SPATIAL_DITHER_DEPTH, dither_depth_bits,
+ DCP_FRAME_RANDOM_ENABLE, frame_random_enable,
+ DCP_RGB_RANDOM_ENABLE, rgb_random_enable,
+ DCP_HIGHPASS_RANDOM_ENABLE, highpass_random_enable);
+}
+
+/*****************************************************************************
+ * dce_transform_bit_depth_reduction_program
+ *
+ * @brief
+ * Programs the DCP bit depth reduction registers (Clamp, Round/Truncate,
+ * Dither) for dce
+ *
+ * @param depth : bit depth to set the clamp to (should match denorm)
+ *
+ ******************************************************************************/
+static void program_bit_depth_reduction(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ enum dcp_bit_depth_reduction_mode depth_reduction_mode;
+ enum dcp_spatial_dither_mode spatial_dither_mode;
+ bool frame_random_enable;
+ bool rgb_random_enable;
+ bool highpass_random_enable;
+
+ ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+
+ if (bit_depth_params->flags.SPATIAL_DITHER_ENABLED) {
+ depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DITHER;
+ frame_random_enable = true;
+ rgb_random_enable = true;
+ highpass_random_enable = true;
+
+ } else {
+ depth_reduction_mode = DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED;
+ frame_random_enable = false;
+ rgb_random_enable = false;
+ highpass_random_enable = false;
+ }
+
+ spatial_dither_mode = DCP_SPATIAL_DITHER_MODE_A_AA_A;
+
+ set_clamp(xfm_dce, depth);
+
+ switch (depth_reduction_mode) {
+ case DCP_BIT_DEPTH_REDUCTION_MODE_DITHER:
+ /* Spatial Dither: Set round/truncate to bypass (12bit),
+ * enable Dither (30bpp) */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+ set_dither(xfm_dce, true, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ case DCP_BIT_DEPTH_REDUCTION_MODE_ROUND:
+ /* Round: Enable round (10bit), disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_ROUND,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ case DCP_BIT_DEPTH_REDUCTION_MODE_TRUNCATE: /* Truncate */
+ /* Truncate: Enable truncate (10bit), disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_10BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+
+ case DCP_BIT_DEPTH_REDUCTION_MODE_DISABLED: /* Disabled */
+ /* Truncate: Set round/truncate to bypass (12bit),
+ * disable Dither */
+ set_round(xfm_dce,
+ DCP_OUT_TRUNC_ROUND_MODE_TRUNCATE,
+ DCP_OUT_TRUNC_ROUND_DEPTH_12BIT);
+
+ set_dither(xfm_dce, false, spatial_dither_mode,
+ DCP_SPATIAL_DITHER_DEPTH_30BPP, frame_random_enable,
+ rgb_random_enable, highpass_random_enable);
+ break;
+ default:
+ /* Invalid DCP Depth reduction mode */
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static int dce_transform_get_max_num_of_supported_lines(
+ struct dce_transform *xfm_dce,
+ enum lb_pixel_depth depth,
+ int pixel_width)
+{
+ int pixels_per_entries = 0;
+ int max_pixels_supports = 0;
+
+ ASSERT(pixel_width);
+
+ /* Find number of pixels that can fit into a single LB entry and
+ * take floor of the value since we cannot store a single pixel
+ * across multiple entries. */
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 18;
+ break;
+
+ case LB_PIXEL_DEPTH_24BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 24;
+ break;
+
+ case LB_PIXEL_DEPTH_30BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 30;
+ break;
+
+ case LB_PIXEL_DEPTH_36BPP:
+ pixels_per_entries = xfm_dce->lb_bits_per_entry / 36;
+ break;
+
+ default:
+ dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LB pixel depth",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ ASSERT(pixels_per_entries);
+
+ max_pixels_supports =
+ pixels_per_entries *
+ xfm_dce->lb_memory_size;
+
+ return (max_pixels_supports / pixel_width);
+}
+
+static void set_denormalization(
+ struct dce_transform *xfm_dce,
+ enum dc_color_depth depth)
+{
+ int denorm_mode = 0;
+
+ switch (depth) {
+ case COLOR_DEPTH_666:
+ /* 63/64 for 6 bit output color depth */
+ denorm_mode = 1;
+ break;
+ case COLOR_DEPTH_888:
+ /* Unity for 8 bit output color depth
+ * because prescale is disabled by default */
+ denorm_mode = 0;
+ break;
+ case COLOR_DEPTH_101010:
+ /* 1023/1024 for 10 bit output color depth */
+ denorm_mode = 3;
+ break;
+ case COLOR_DEPTH_121212:
+ /* 4095/4096 for 12 bit output color depth */
+ denorm_mode = 5;
+ break;
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* not valid used case! */
+ break;
+ }
+
+ REG_SET(DENORM_CONTROL, 0, DENORM_MODE, denorm_mode);
+}
+
+static void dce_transform_set_pixel_storage_depth(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_depth, expan_mode;
+ enum dc_color_depth color_depth;
+
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ color_depth = COLOR_DEPTH_666;
+ pixel_depth = 2;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ color_depth = COLOR_DEPTH_888;
+ pixel_depth = 1;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ color_depth = COLOR_DEPTH_121212;
+ pixel_depth = 3;
+ expan_mode = 0;
+ break;
+ default:
+ color_depth = COLOR_DEPTH_101010;
+ pixel_depth = 0;
+ expan_mode = 1;
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ set_denormalization(xfm_dce, color_depth);
+ program_bit_depth_reduction(xfm_dce, color_depth, bit_depth_params);
+
+ REG_UPDATE_2(LB_DATA_FORMAT,
+ PIXEL_DEPTH, pixel_depth,
+ PIXEL_EXPAN_MODE, expan_mode);
+
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+ dm_logger_write(xfm->ctx->logger, LOG_WARNING,
+ "%s: Capability not supported",
+ __func__);
+ }
+}
+
+static void program_gamut_remap(
+ struct dce_transform *xfm_dce,
+ const uint16_t *reg_val)
+{
+ if (reg_val) {
+ REG_SET_2(GAMUT_REMAP_C11_C12, 0,
+ GAMUT_REMAP_C11, reg_val[0],
+ GAMUT_REMAP_C12, reg_val[1]);
+ REG_SET_2(GAMUT_REMAP_C13_C14, 0,
+ GAMUT_REMAP_C13, reg_val[2],
+ GAMUT_REMAP_C14, reg_val[3]);
+ REG_SET_2(GAMUT_REMAP_C21_C22, 0,
+ GAMUT_REMAP_C21, reg_val[4],
+ GAMUT_REMAP_C22, reg_val[5]);
+ REG_SET_2(GAMUT_REMAP_C23_C24, 0,
+ GAMUT_REMAP_C23, reg_val[6],
+ GAMUT_REMAP_C24, reg_val[7]);
+ REG_SET_2(GAMUT_REMAP_C31_C32, 0,
+ GAMUT_REMAP_C31, reg_val[8],
+ GAMUT_REMAP_C32, reg_val[9]);
+ REG_SET_2(GAMUT_REMAP_C33_C34, 0,
+ GAMUT_REMAP_C33, reg_val[10],
+ GAMUT_REMAP_C34, reg_val[11]);
+
+ REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 1);
+ } else
+ REG_SET(GAMUT_REMAP_CONTROL, 0, GRPH_GAMUT_REMAP_MODE, 0);
+
+}
+
+/**
+ *****************************************************************************
+ * Function: dal_transform_wide_gamut_set_gamut_remap
+ *
+ * @param [in] const struct xfm_grph_csc_adjustment *adjust
+ *
+ * @return
+ * void
+ *
+ * @note calculate and apply color temperature adjustment to in Rgb color space
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+static void dce_transform_set_gamut_remap(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(xfm_dce, NULL);
+ else {
+ struct fixed31_32 arr_matrix[GAMUT_MATRIX_SIZE];
+ uint16_t arr_reg_val[GAMUT_MATRIX_SIZE];
+
+ arr_matrix[0] = adjust->temperature_matrix[0];
+ arr_matrix[1] = adjust->temperature_matrix[1];
+ arr_matrix[2] = adjust->temperature_matrix[2];
+ arr_matrix[3] = dal_fixed31_32_zero;
+
+ arr_matrix[4] = adjust->temperature_matrix[3];
+ arr_matrix[5] = adjust->temperature_matrix[4];
+ arr_matrix[6] = adjust->temperature_matrix[5];
+ arr_matrix[7] = dal_fixed31_32_zero;
+
+ arr_matrix[8] = adjust->temperature_matrix[6];
+ arr_matrix[9] = adjust->temperature_matrix[7];
+ arr_matrix[10] = adjust->temperature_matrix[8];
+ arr_matrix[11] = dal_fixed31_32_zero;
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, GAMUT_MATRIX_SIZE);
+
+ program_gamut_remap(xfm_dce, arr_reg_val);
+ }
+}
+
+static uint32_t decide_taps(struct fixed31_32 ratio, uint32_t in_taps, bool chroma)
+{
+ uint32_t taps;
+
+ if (IDENTITY_RATIO(ratio)) {
+ return 1;
+ } else if (in_taps != 0) {
+ taps = in_taps;
+ } else {
+ taps = 4;
+ }
+
+ if (chroma) {
+ taps /= 2;
+ if (taps < 2)
+ taps = 2;
+ }
+
+ return taps;
+}
+
+
+bool dce_transform_get_optimal_number_of_taps(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_width = scl_data->viewport.width;
+ int max_num_of_lines;
+
+ if (xfm_dce->prescaler_on &&
+ (scl_data->viewport.width > scl_data->recout.width))
+ pixel_width = scl_data->recout.width;
+
+ max_num_of_lines = dce_transform_get_max_num_of_supported_lines(
+ xfm_dce,
+ scl_data->lb_params.depth,
+ pixel_width);
+
+ /* Fail if in_taps are impossible */
+ if (in_taps->v_taps >= max_num_of_lines)
+ return false;
+
+ /*
+ * Set taps according to this policy (in this order)
+ * - Use 1 for no scaling
+ * - Use input taps
+ * - Use 4 and reduce as required by line buffer size
+ * - Decide chroma taps if chroma is scaled
+ *
+ * Ignore input chroma taps. Decide based on non-chroma
+ */
+ scl_data->taps.h_taps = decide_taps(scl_data->ratios.horz, in_taps->h_taps, false);
+ scl_data->taps.v_taps = decide_taps(scl_data->ratios.vert, in_taps->v_taps, false);
+ scl_data->taps.h_taps_c = decide_taps(scl_data->ratios.horz_c, in_taps->h_taps, true);
+ scl_data->taps.v_taps_c = decide_taps(scl_data->ratios.vert_c, in_taps->v_taps, true);
+
+ if (!IDENTITY_RATIO(scl_data->ratios.vert)) {
+ /* reduce v_taps if needed but ensure we have at least two */
+ if (in_taps->v_taps == 0
+ && max_num_of_lines <= scl_data->taps.v_taps
+ && scl_data->taps.v_taps > 1) {
+ scl_data->taps.v_taps = max_num_of_lines - 1;
+ }
+
+ if (scl_data->taps.v_taps <= 1)
+ return false;
+ }
+
+ if (!IDENTITY_RATIO(scl_data->ratios.vert_c)) {
+ /* reduce chroma v_taps if needed but ensure we have at least two */
+ if (max_num_of_lines <= scl_data->taps.v_taps_c && scl_data->taps.v_taps_c > 1) {
+ scl_data->taps.v_taps_c = max_num_of_lines - 1;
+ }
+
+ if (scl_data->taps.v_taps_c <= 1)
+ return false;
+ }
+
+ /* we've got valid taps */
+ return true;
+}
+
+static void dce_transform_reset(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ xfm_dce->filter_h = NULL;
+ xfm_dce->filter_v = NULL;
+}
+
+static void program_color_matrix(
+ struct dce_transform *xfm_dce,
+ const struct out_csc_color_matrix *tbl_entry,
+ enum grph_color_adjust_option options)
+{
+ {
+ REG_SET_2(OUTPUT_CSC_C11_C12, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[0],
+ OUTPUT_CSC_C12, tbl_entry->regval[1]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C13_C14, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[2],
+ OUTPUT_CSC_C12, tbl_entry->regval[3]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C21_C22, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[4],
+ OUTPUT_CSC_C12, tbl_entry->regval[5]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C23_C24, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[6],
+ OUTPUT_CSC_C12, tbl_entry->regval[7]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C31_C32, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[8],
+ OUTPUT_CSC_C12, tbl_entry->regval[9]);
+ }
+ {
+ REG_SET_2(OUTPUT_CSC_C33_C34, 0,
+ OUTPUT_CSC_C11, tbl_entry->regval[10],
+ OUTPUT_CSC_C12, tbl_entry->regval[11]);
+ }
+}
+
+static bool configure_graphics_mode(
+ struct dce_transform *xfm_dce,
+ enum csc_color_mode config,
+ enum graphics_csc_adjust_type csc_adjust_type,
+ enum dc_color_space color_space)
+{
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+
+ if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
+ if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC) {
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 4);
+ } else {
+
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* TV RGB */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 1);
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 2);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 3);
+ break;
+ default:
+ return false;
+ }
+ }
+ } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+ break;
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* TV RGB */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 1);
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 2);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 3);
+ break;
+ default:
+ return false;
+ }
+
+ } else
+ /* by pass */
+ REG_SET(OUTPUT_CSC_CONTROL, 0,
+ OUTPUT_CSC_GRPH_MODE, 0);
+
+ return true;
+}
+
+void dce110_opp_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+
+ program_color_matrix(
+ xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+
+ /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
+ configure_graphics_mode(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
+ tbl_entry->color_space);
+}
+
+void dce110_opp_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
+
+ if (default_adjust->force_hw_default == false) {
+ const struct out_csc_color_matrix *elm;
+ /* currently parameter not in use */
+ enum grph_color_adjust_option option =
+ GRPH_COLOR_MATRIX_HW_DEFAULT;
+ uint32_t i;
+ /*
+ * HW default false we program locally defined matrix
+ * HW default true we use predefined hw matrix and we
+ * do not need to program matrix
+ * OEM wants the HW default via runtime parameter.
+ */
+ option = GRPH_COLOR_MATRIX_SW;
+
+ for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
+ elm = &global_color_matrix[i];
+ if (elm->color_space != default_adjust->out_color_space)
+ continue;
+ /* program the matrix with default values from this
+ * file */
+ program_color_matrix(xfm_dce, elm, option);
+ config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ break;
+ }
+ }
+
+ /* configure the what we programmed :
+ * 1. Default values from this file
+ * 2. Use hardware default from ROM_A and we do not need to program
+ * matrix */
+
+ configure_graphics_mode(xfm_dce, config,
+ default_adjust->csc_adjust_type,
+ default_adjust->out_color_space);
+}
+
+static void program_pwl(
+ struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ uint32_t value;
+ int retval;
+
+ {
+ uint8_t max_tries = 10;
+ uint8_t counter = 0;
+
+ /* Power on LUT memory */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, 1);
+ else
+ REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 1);
+
+ while (counter < max_tries) {
+ if (REG(DCFE_MEM_PWR_STATUS)) {
+ value = REG_READ(DCFE_MEM_PWR_STATUS);
+ REG_GET(DCFE_MEM_PWR_STATUS,
+ DCP_REGAMMA_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
+ } else {
+ value = REG_READ(DCFE_MEM_LIGHT_SLEEP_CNTL);
+ REG_GET(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_MEM_PWR_STATE,
+ &retval);
+
+ if (retval == 0)
+ break;
+ ++counter;
+ }
+ }
+
+ if (counter == max_tries) {
+ dm_logger_write(xfm_dce->base.ctx->logger, LOG_WARNING,
+ "%s: regamma lut was not powered on "
+ "in a timely manner,"
+ " programming still proceeds\n",
+ __func__);
+ }
+ }
+
+ REG_UPDATE(REGAMMA_LUT_WRITE_EN_MASK,
+ REGAMMA_LUT_WRITE_EN_MASK, 7);
+
+ REG_WRITE(REGAMMA_LUT_INDEX, 0);
+
+ /* Program REGAMMA_LUT_DATA */
+ {
+ uint32_t i = 0;
+ const struct pwl_result_data *rgb = params->rgb_resulted;
+
+ while (i != params->hw_points_num) {
+
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->blue_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_red_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_green_reg);
+ REG_WRITE(REGAMMA_LUT_DATA, rgb->delta_blue_reg);
+
+ ++rgb;
+ ++i;
+ }
+ }
+
+ /* we are done with DCP LUT memory; re-enable low power mode */
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, 0);
+ else
+ REG_UPDATE(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, 0);
+}
+
+static void regamma_config_regions_and_segments(
+ struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+
+ {
+ REG_SET_2(REGAMMA_CNTLA_START_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_START, params->arr_points[0].custom_float_x,
+ REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, 0);
+ }
+ {
+ REG_SET(REGAMMA_CNTLA_SLOPE_CNTL, 0,
+ REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, params->arr_points[0].custom_float_slope);
+
+ }
+ {
+ REG_SET(REGAMMA_CNTLA_END_CNTL1, 0,
+ REGAMMA_CNTLA_EXP_REGION_END, params->arr_points[1].custom_float_x);
+ }
+ {
+ REG_SET_2(REGAMMA_CNTLA_END_CNTL2, 0,
+ REGAMMA_CNTLA_EXP_REGION_END_BASE, params->arr_points[1].custom_float_y,
+ REGAMMA_CNTLA_EXP_REGION_END_SLOPE, params->arr_points[2].custom_float_slope);
+ }
+
+ curve = params->arr_curve_points;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_0_1, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_2_3, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_4_5, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_6_7, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_8_9, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_10_11, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_12_13, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+
+ }
+
+ curve += 2;
+
+ {
+ REG_SET_4(REGAMMA_CNTLA_REGION_14_15, 0,
+ REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, curve[0].offset,
+ REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num,
+ REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, curve[1].offset,
+ REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num);
+ }
+}
+
+
+
+void dce110_opp_program_regamma_pwl(
+ struct transform *xfm,
+ const struct pwl_params *params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ /* Setup regions */
+ regamma_config_regions_and_segments(xfm_dce, params);
+
+ /* Program PWL */
+ program_pwl(xfm_dce, params);
+}
+
+void dce110_opp_power_on_regamma_lut(
+ struct transform *xfm,
+ bool power_on)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ if (REG(DCFE_MEM_PWR_CTRL))
+ REG_UPDATE_2(DCFE_MEM_PWR_CTRL,
+ DCP_REGAMMA_MEM_PWR_DIS, power_on,
+ DCP_LUT_MEM_PWR_DIS, power_on);
+ else
+ REG_UPDATE_2(DCFE_MEM_LIGHT_SLEEP_CNTL,
+ REGAMMA_LUT_LIGHT_SLEEP_DIS, power_on,
+ DCP_LUT_LIGHT_SLEEP_DIS, power_on);
+
+}
+
+void dce110_opp_set_regamma_mode(struct transform *xfm,
+ enum opp_regamma mode)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ REG_SET(REGAMMA_CONTROL, 0,
+ GRPH_REGAMMA_MODE, mode);
+}
+
+static const struct transform_funcs dce_transform_funcs = {
+ .transform_reset = dce_transform_reset,
+ .transform_set_scaler =
+ dce_transform_set_scaler,
+ .transform_set_gamut_remap =
+ dce_transform_set_gamut_remap,
+ .opp_set_csc_adjustment = dce110_opp_set_csc_adjustment,
+ .opp_set_csc_default = dce110_opp_set_csc_default,
+ .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut,
+ .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl,
+ .opp_set_regamma_mode = dce110_opp_set_regamma_mode,
+ .transform_set_pixel_storage_depth =
+ dce_transform_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps =
+ dce_transform_get_optimal_number_of_taps
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce_transform_construct(
+ struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask)
+{
+ xfm_dce->base.ctx = ctx;
+
+ xfm_dce->base.inst = inst;
+ xfm_dce->base.funcs = &dce_transform_funcs;
+
+ xfm_dce->regs = regs;
+ xfm_dce->xfm_shift = xfm_shift;
+ xfm_dce->xfm_mask = xfm_mask;
+
+ xfm_dce->prescaler_on = true;
+ xfm_dce->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
new file mode 100644
index 000000000000..bfc94b4927b9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCE_DCE_TRANSFORM_H_
+#define _DCE_DCE_TRANSFORM_H_
+
+
+#include "transform.h"
+
+#define TO_DCE_TRANSFORM(transform)\
+ container_of(transform, struct dce_transform, base)
+
+#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
+#define LB_BITS_PER_ENTRY 144
+
+#define XFM_COMMON_REG_LIST_DCE_BASE(id) \
+ SRI(LB_DATA_FORMAT, LB, id), \
+ SRI(GAMUT_REMAP_CONTROL, DCP, id), \
+ SRI(GAMUT_REMAP_C11_C12, DCP, id), \
+ SRI(GAMUT_REMAP_C13_C14, DCP, id), \
+ SRI(GAMUT_REMAP_C21_C22, DCP, id), \
+ SRI(GAMUT_REMAP_C23_C24, DCP, id), \
+ SRI(GAMUT_REMAP_C31_C32, DCP, id), \
+ SRI(GAMUT_REMAP_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_C11_C12, DCP, id), \
+ SRI(OUTPUT_CSC_C13_C14, DCP, id), \
+ SRI(OUTPUT_CSC_C21_C22, DCP, id), \
+ SRI(OUTPUT_CSC_C23_C24, DCP, id), \
+ SRI(OUTPUT_CSC_C31_C32, DCP, id), \
+ SRI(OUTPUT_CSC_C33_C34, DCP, id), \
+ SRI(OUTPUT_CSC_CONTROL, DCP, id), \
+ SRI(REGAMMA_CNTLA_START_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_SLOPE_CNTL, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL1, DCP, id), \
+ SRI(REGAMMA_CNTLA_END_CNTL2, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_0_1, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_2_3, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_4_5, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_6_7, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_8_9, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_10_11, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_12_13, DCP, id), \
+ SRI(REGAMMA_CNTLA_REGION_14_15, DCP, id), \
+ SRI(REGAMMA_LUT_WRITE_EN_MASK, DCP, id), \
+ SRI(REGAMMA_LUT_INDEX, DCP, id), \
+ SRI(REGAMMA_LUT_DATA, DCP, id), \
+ SRI(REGAMMA_CONTROL, DCP, id), \
+ SRI(DENORM_CONTROL, DCP, id), \
+ SRI(DCP_SPATIAL_DITHER_CNTL, DCP, id), \
+ SRI(OUT_ROUND_CONTROL, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_R_CR, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_G_Y, DCP, id), \
+ SRI(OUT_CLAMP_CONTROL_B_CB, DCP, id), \
+ SRI(SCL_MODE, SCL, id), \
+ SRI(SCL_TAP_CONTROL, SCL, id), \
+ SRI(SCL_CONTROL, SCL, id), \
+ SRI(SCL_BYPASS_CONTROL, SCL, id), \
+ SRI(EXT_OVERSCAN_LEFT_RIGHT, SCL, id), \
+ SRI(EXT_OVERSCAN_TOP_BOTTOM, SCL, id), \
+ SRI(SCL_VERT_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_HORZ_FILTER_CONTROL, SCL, id), \
+ SRI(SCL_COEF_RAM_SELECT, SCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
+ SRI(VIEWPORT_START, SCL, id), \
+ SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, SCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, SCL, id), \
+ SRI(SCL_AUTOMATIC_MODE_CONTROL, SCL, id), \
+ SRI(LB_MEMORY_CTRL, LB, id), \
+ SRI(SCL_UPDATE, SCL, id), \
+ SRI(SCL_F_SHARP_CONTROL, SCL, id)
+
+#define XFM_COMMON_REG_LIST_DCE80(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_LIGHT_SLEEP_CNTL, CRTC, id)
+
+#define XFM_COMMON_REG_LIST_DCE100(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, CRTC, id), \
+ SRI(DCFE_MEM_PWR_STATUS, CRTC, id)
+
+#define XFM_COMMON_REG_LIST_DCE110(id) \
+ XFM_COMMON_REG_LIST_DCE_BASE(id), \
+ SRI(DCFE_MEM_PWR_CTRL, DCFE, id), \
+ SRI(DCFE_MEM_PWR_STATUS, DCFE, id)
+
+#define XFM_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
+ XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
+ XFM_SF(OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
+ XFM_SF(OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DENORM_CONTROL, DENORM_MODE, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+ XFM_SF(GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+ XFM_SF(GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\
+ XFM_SF(OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\
+ XFM_SF(OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ XFM_SF(REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(REGAMMA_LUT_WRITE_EN_MASK, REGAMMA_LUT_WRITE_EN_MASK, mask_sh),\
+ XFM_SF(REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(SCL_MODE, SCL_MODE, mask_sh), \
+ XFM_SF(SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
+ XFM_SF(SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+ XFM_SF(EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+ XFM_SF(VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+ XFM_SF(VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+ XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
+ XFM_SF(LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
+ XFM_SF(SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
+ XFM_SF(LB_DATA_FORMAT, ALPHA_EN, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE80(mask_sh) \
+ XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, DCP_LUT_LIGHT_SLEEP_DIS, mask_sh),\
+ OPP_SF(DCFE_MEM_LIGHT_SLEEP_CNTL, REGAMMA_LUT_MEM_PWR_STATE, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_DCE110(mask_sh) \
+ XFM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
+ XFM_SF(DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE_MEM_PWR_STATUS, DCP_REGAMMA_MEM_PWR_STATE, mask_sh),\
+ XFM_SF(SCL_MODE, SCL_PSCL_EN, mask_sh)
+
+#define XFM_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MIN_B_CB, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_B_CB, OUT_CLAMP_MAX_B_CB, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MIN_G_Y, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_G_Y, OUT_CLAMP_MAX_G_Y, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MIN_R_CR, mask_sh), \
+ XFM_SF(DCP0_OUT_CLAMP_CONTROL_R_CR, OUT_CLAMP_MAX_R_CR, mask_sh), \
+ XFM_SF(DCP0_OUT_ROUND_CONTROL, OUT_ROUND_TRUNC_MODE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_EN, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_MODE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_SPATIAL_DITHER_DEPTH, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_FRAME_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_RGB_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DCP_SPATIAL_DITHER_CNTL, DCP_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ XFM_SF(DCP0_DENORM_CONTROL, DENORM_MODE, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C11_C12, GAMUT_REMAP_C11, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C11_C12, GAMUT_REMAP_C12, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C13_C14, GAMUT_REMAP_C13, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C13_C14, GAMUT_REMAP_C14, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C21_C22, GAMUT_REMAP_C21, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C21_C22, GAMUT_REMAP_C22, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C23_C24, GAMUT_REMAP_C23, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C23_C24, GAMUT_REMAP_C24, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C31_C32, GAMUT_REMAP_C31, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C31_C32, GAMUT_REMAP_C32, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C33_C34, GAMUT_REMAP_C33, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_C33_C34, GAMUT_REMAP_C34, mask_sh), \
+ XFM_SF(DCP0_GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, mask_sh), \
+ XFM_SF(DCP0_OUTPUT_CSC_C11_C12, OUTPUT_CSC_C11, mask_sh),\
+ XFM_SF(DCP0_OUTPUT_CSC_C11_C12, OUTPUT_CSC_C12, mask_sh),\
+ XFM_SF(DCP0_OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_START_CNTL, REGAMMA_CNTLA_EXP_REGION_START_SEGMENT, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_SLOPE_CNTL, REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL1, REGAMMA_CNTLA_EXP_REGION_END, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_BASE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_END_CNTL2, REGAMMA_CNTLA_EXP_REGION_END_SLOPE, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CNTLA_REGION_0_1, REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\
+ XFM_SF(DCP0_REGAMMA_CONTROL, GRPH_REGAMMA_MODE, mask_sh),\
+ XFM_SF(SCL0_SCL_MODE, SCL_MODE, mask_sh), \
+ XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_H_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL0_SCL_TAP_CONTROL, SCL_V_NUM_OF_TAPS, mask_sh), \
+ XFM_SF(SCL0_SCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh), \
+ XFM_SF(SCL0_SCL_BYPASS_CONTROL, SCL_BYPASS_MODE, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh), \
+ XFM_SF(SCL0_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_PHASE, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_EVEN_TAP_COEF, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_COEF_RAM_TAP_DATA, SCL_C_RAM_ODD_TAP_COEF, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_START, VIEWPORT_X_START, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_START, VIEWPORT_Y_START, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT, mask_sh), \
+ XFM_SF(SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh), \
+ XFM_SF(LB0_LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mask_sh), \
+ XFM_SF(LB0_LB_MEMORY_CTRL, LB_MEMORY_SIZE, mask_sh), \
+ XFM_SF(SCL0_SCL_VERT_FILTER_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_HORZ_FILTER_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh), \
+ XFM_SF(SCL0_SCL_UPDATE, SCL_COEF_UPDATE_COMPLETE, mask_sh), \
+ XFM_SF(LB0_LB_DATA_FORMAT, ALPHA_EN, mask_sh), \
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, SCL_COEFF_MEM_PWR_DIS, mask_sh), \
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_REGAMMA_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE0_DCFE_MEM_PWR_CTRL, DCP_LUT_MEM_PWR_DIS, mask_sh),\
+ XFM_SF(DCFE0_DCFE_MEM_PWR_STATUS, SCL_COEFF_MEM_PWR_STATE, mask_sh), \
+ XFM_SF(SCL0_SCL_MODE, SCL_PSCL_EN, mask_sh)
+
+#define XFM_REG_FIELD_LIST(type) \
+ type OUT_CLAMP_MIN_B_CB; \
+ type OUT_CLAMP_MAX_B_CB; \
+ type OUT_CLAMP_MIN_G_Y; \
+ type OUT_CLAMP_MAX_G_Y; \
+ type OUT_CLAMP_MIN_R_CR; \
+ type OUT_CLAMP_MAX_R_CR; \
+ type OUT_ROUND_TRUNC_MODE; \
+ type DCP_SPATIAL_DITHER_EN; \
+ type DCP_SPATIAL_DITHER_MODE; \
+ type DCP_SPATIAL_DITHER_DEPTH; \
+ type DCP_FRAME_RANDOM_ENABLE; \
+ type DCP_RGB_RANDOM_ENABLE; \
+ type DCP_HIGHPASS_RANDOM_ENABLE; \
+ type DENORM_MODE; \
+ type PIXEL_DEPTH; \
+ type PIXEL_EXPAN_MODE; \
+ type GAMUT_REMAP_C11; \
+ type GAMUT_REMAP_C12; \
+ type GAMUT_REMAP_C13; \
+ type GAMUT_REMAP_C14; \
+ type GAMUT_REMAP_C21; \
+ type GAMUT_REMAP_C22; \
+ type GAMUT_REMAP_C23; \
+ type GAMUT_REMAP_C24; \
+ type GAMUT_REMAP_C31; \
+ type GAMUT_REMAP_C32; \
+ type GAMUT_REMAP_C33; \
+ type GAMUT_REMAP_C34; \
+ type GRPH_GAMUT_REMAP_MODE; \
+ type OUTPUT_CSC_C11; \
+ type OUTPUT_CSC_C12; \
+ type OUTPUT_CSC_GRPH_MODE; \
+ type DCP_REGAMMA_MEM_PWR_DIS; \
+ type DCP_LUT_MEM_PWR_DIS; \
+ type REGAMMA_LUT_LIGHT_SLEEP_DIS; \
+ type DCP_LUT_LIGHT_SLEEP_DIS; \
+ type REGAMMA_CNTLA_EXP_REGION_START; \
+ type REGAMMA_CNTLA_EXP_REGION_START_SEGMENT; \
+ type REGAMMA_CNTLA_EXP_REGION_LINEAR_SLOPE; \
+ type REGAMMA_CNTLA_EXP_REGION_END; \
+ type REGAMMA_CNTLA_EXP_REGION_END_BASE; \
+ type REGAMMA_CNTLA_EXP_REGION_END_SLOPE; \
+ type REGAMMA_CNTLA_EXP_REGION0_LUT_OFFSET; \
+ type REGAMMA_CNTLA_EXP_REGION0_NUM_SEGMENTS; \
+ type REGAMMA_CNTLA_EXP_REGION1_LUT_OFFSET; \
+ type REGAMMA_CNTLA_EXP_REGION1_NUM_SEGMENTS; \
+ type DCP_REGAMMA_MEM_PWR_STATE; \
+ type REGAMMA_LUT_MEM_PWR_STATE; \
+ type REGAMMA_LUT_WRITE_EN_MASK; \
+ type GRPH_REGAMMA_MODE; \
+ type SCL_MODE; \
+ type SCL_BYPASS_MODE; \
+ type SCL_PSCL_EN; \
+ type SCL_H_NUM_OF_TAPS; \
+ type SCL_V_NUM_OF_TAPS; \
+ type SCL_BOUNDARY_MODE; \
+ type EXT_OVERSCAN_LEFT; \
+ type EXT_OVERSCAN_RIGHT; \
+ type EXT_OVERSCAN_TOP; \
+ type EXT_OVERSCAN_BOTTOM; \
+ type SCL_COEFF_MEM_PWR_DIS; \
+ type SCL_COEFF_MEM_PWR_STATE; \
+ type SCL_C_RAM_FILTER_TYPE; \
+ type SCL_C_RAM_PHASE; \
+ type SCL_C_RAM_TAP_PAIR_IDX; \
+ type SCL_C_RAM_EVEN_TAP_COEF_EN; \
+ type SCL_C_RAM_EVEN_TAP_COEF; \
+ type SCL_C_RAM_ODD_TAP_COEF_EN; \
+ type SCL_C_RAM_ODD_TAP_COEF; \
+ type VIEWPORT_X_START; \
+ type VIEWPORT_Y_START; \
+ type VIEWPORT_HEIGHT; \
+ type VIEWPORT_WIDTH; \
+ type SCL_H_SCALE_RATIO; \
+ type SCL_V_SCALE_RATIO; \
+ type SCL_H_INIT_INT; \
+ type SCL_H_INIT_FRAC; \
+ type SCL_V_INIT_INT; \
+ type SCL_V_INIT_FRAC; \
+ type LB_MEMORY_CONFIG; \
+ type LB_MEMORY_SIZE; \
+ type SCL_V_2TAP_HARDCODE_COEF_EN; \
+ type SCL_H_2TAP_HARDCODE_COEF_EN; \
+ type SCL_COEF_UPDATE_COMPLETE; \
+ type ALPHA_EN
+
+struct dce_transform_shift {
+ XFM_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_transform_mask {
+ XFM_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_transform_registers {
+ uint32_t LB_DATA_FORMAT;
+ uint32_t GAMUT_REMAP_CONTROL;
+ uint32_t GAMUT_REMAP_C11_C12;
+ uint32_t GAMUT_REMAP_C13_C14;
+ uint32_t GAMUT_REMAP_C21_C22;
+ uint32_t GAMUT_REMAP_C23_C24;
+ uint32_t GAMUT_REMAP_C31_C32;
+ uint32_t GAMUT_REMAP_C33_C34;
+ uint32_t OUTPUT_CSC_C11_C12;
+ uint32_t OUTPUT_CSC_C13_C14;
+ uint32_t OUTPUT_CSC_C21_C22;
+ uint32_t OUTPUT_CSC_C23_C24;
+ uint32_t OUTPUT_CSC_C31_C32;
+ uint32_t OUTPUT_CSC_C33_C34;
+ uint32_t OUTPUT_CSC_CONTROL;
+ uint32_t DCFE_MEM_LIGHT_SLEEP_CNTL;
+ uint32_t REGAMMA_CNTLA_START_CNTL;
+ uint32_t REGAMMA_CNTLA_SLOPE_CNTL;
+ uint32_t REGAMMA_CNTLA_END_CNTL1;
+ uint32_t REGAMMA_CNTLA_END_CNTL2;
+ uint32_t REGAMMA_CNTLA_REGION_0_1;
+ uint32_t REGAMMA_CNTLA_REGION_2_3;
+ uint32_t REGAMMA_CNTLA_REGION_4_5;
+ uint32_t REGAMMA_CNTLA_REGION_6_7;
+ uint32_t REGAMMA_CNTLA_REGION_8_9;
+ uint32_t REGAMMA_CNTLA_REGION_10_11;
+ uint32_t REGAMMA_CNTLA_REGION_12_13;
+ uint32_t REGAMMA_CNTLA_REGION_14_15;
+ uint32_t REGAMMA_LUT_WRITE_EN_MASK;
+ uint32_t REGAMMA_LUT_INDEX;
+ uint32_t REGAMMA_LUT_DATA;
+ uint32_t REGAMMA_CONTROL;
+ uint32_t DENORM_CONTROL;
+ uint32_t DCP_SPATIAL_DITHER_CNTL;
+ uint32_t OUT_ROUND_CONTROL;
+ uint32_t OUT_CLAMP_CONTROL_R_CR;
+ uint32_t OUT_CLAMP_CONTROL_G_Y;
+ uint32_t OUT_CLAMP_CONTROL_B_CB;
+ uint32_t SCL_MODE;
+ uint32_t SCL_TAP_CONTROL;
+ uint32_t SCL_CONTROL;
+ uint32_t SCL_BYPASS_CONTROL;
+ uint32_t EXT_OVERSCAN_LEFT_RIGHT;
+ uint32_t EXT_OVERSCAN_TOP_BOTTOM;
+ uint32_t SCL_VERT_FILTER_CONTROL;
+ uint32_t SCL_HORZ_FILTER_CONTROL;
+ uint32_t DCFE_MEM_PWR_CTRL;
+ uint32_t DCFE_MEM_PWR_STATUS;
+ uint32_t SCL_COEF_RAM_SELECT;
+ uint32_t SCL_COEF_RAM_TAP_DATA;
+ uint32_t VIEWPORT_START;
+ uint32_t VIEWPORT_SIZE;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t SCL_HORZ_FILTER_INIT;
+ uint32_t SCL_VERT_FILTER_INIT;
+ uint32_t SCL_AUTOMATIC_MODE_CONTROL;
+ uint32_t LB_MEMORY_CTRL;
+ uint32_t SCL_UPDATE;
+ uint32_t SCL_F_SHARP_CONTROL;
+};
+
+struct init_int_and_frac {
+ uint32_t integer;
+ uint32_t fraction;
+};
+
+struct scl_ratios_inits {
+ uint32_t h_int_scale_ratio;
+ uint32_t v_int_scale_ratio;
+ struct init_int_and_frac h_init;
+ struct init_int_and_frac v_init;
+};
+
+enum ram_filter_type {
+ FILTER_TYPE_RGB_Y_VERTICAL = 0, /* 0 - RGB/Y Vertical filter */
+ FILTER_TYPE_CBCR_VERTICAL = 1, /* 1 - CbCr Vertical filter */
+ FILTER_TYPE_RGB_Y_HORIZONTAL = 2, /* 1 - RGB/Y Horizontal filter */
+ FILTER_TYPE_CBCR_HORIZONTAL = 3, /* 3 - CbCr Horizontal filter */
+ FILTER_TYPE_ALPHA_VERTICAL = 4, /* 4 - Alpha Vertical filter. */
+ FILTER_TYPE_ALPHA_HORIZONTAL = 5, /* 5 - Alpha Horizontal filter. */
+};
+
+struct dce_transform {
+ struct transform base;
+ const struct dce_transform_registers *regs;
+ const struct dce_transform_shift *xfm_shift;
+ const struct dce_transform_mask *xfm_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool prescaler_on;
+};
+
+void dce_transform_construct(struct dce_transform *xfm_dce,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dce_transform_registers *regs,
+ const struct dce_transform_shift *xfm_shift,
+ const struct dce_transform_mask *xfm_mask);
+
+bool dce_transform_get_optimal_number_of_taps(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+void dce110_opp_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dce110_opp_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+/* REGAMMA RELATED */
+void dce110_opp_power_on_regamma_lut(
+ struct transform *xfm,
+ bool power_on);
+
+void dce110_opp_program_regamma_pwl(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+void dce110_opp_set_regamma_mode(struct transform *xfm,
+ enum opp_regamma mode);
+
+#endif /* _DCE_DCE_TRANSFORM_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
new file mode 100644
index 000000000000..ea40870624b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
@@ -0,0 +1,23 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE100 = dce100_resource.o dce100_hw_sequencer.o
+
+AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
+
+
+###############################################################################
+# DCE 10x
+###############################################################################
+ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
+TG_DCE100 = dce100_resource.o
+
+AMD_DAL_TG_DCE100 = $(addprefix \
+ $(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
+endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
new file mode 100644
index 000000000000..e7a694835e3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "dce100_hw_sequencer.h"
+#include "resource.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+/* include DCE10 register header files */
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+struct dce100_hw_seq_reg_offsets {
+ uint32_t blnd;
+ uint32_t crtc;
+};
+
+static const struct dce100_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+/***************************PIPE_CONTROL***********************************/
+
+static bool dce100_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+static void dce100_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER;*/
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce100_pplib_apply_display_requirements(dc, context);
+}
+
+
+/**************************************************************************/
+
+void dce100_hw_sequencer_construct(struct dc *dc)
+{
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
+ dc->hwss.set_bandwidth = dce100_set_bandwidth;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
new file mode 100644
index 000000000000..cb5384ef46c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -0,0 +1,42 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE100_H__
+#define __DC_HWSS_DCE100_H__
+
+#include "core_types.h"
+
+struct dc;
+struct dc_state;
+
+void dce100_hw_sequencer_construct(struct dc *dc);
+
+void dce100_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
+#endif /* __DC_HWSS_DCE100_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
new file mode 100644
index 000000000000..90911258bdb3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -0,0 +1,933 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce110/dce110_timing_generator.h"
+#include "irq/dce110/irq_service_dce110.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+
+#include "dce/dce_mem_input.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE100_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE100(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE100_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST_DCE_BASE(id),\
+ .AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5),
+ stream_enc_regs(6)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_100_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_100(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
+};
+
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_100_110(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+
+
+#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce100_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce100_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id], &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE10_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE10_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE10_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce100_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce100_stream_encoder_create,
+ .create_hwseq = dce100_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE8_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE8_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE8_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce100_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 2;
+ return &dce_mi->base;
+}
+
+static void dce100_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce100_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ return &transform->base;
+}
+
+static struct input_pixel_processor *dce100_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 300000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce100_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+struct output_pixel_processor *dce100_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce100_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce100_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce100_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL)
+ dce100_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce100_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL)
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL)
+ dal_irq_service_destroy(&pool->base.irqs);
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+
+ return true;
+}
+
+static bool dce100_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce100_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce100_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+enum dc_status dce100_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dce100_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce100_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static void dce100_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
+{
+
+ if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return DC_OK;
+
+ return DC_FAIL_SURFACE_VALIDATE;
+}
+
+static const struct resource_funcs dce100_res_pool_funcs = {
+ .destroy = dce100_destroy_resource_pool,
+ .link_enc_create = dce100_link_encoder_create,
+ .validate_guaranteed = dce100_validate_guaranteed,
+ .validate_bandwidth = dce100_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce100_add_stream_to_ctx,
+ .validate_global = dce100_validate_global
+};
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce100_res_pool_funcs;
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce100_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce100_timing_generator_create(
+ ctx,
+ i,
+ &dce100_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce100_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce100_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce100_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce100_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce100_hw_sequencer_construct(dc);
+ return true;
+
+res_create_fail:
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dce100_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
new file mode 100644
index 000000000000..de8fdf438f9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
@@ -0,0 +1,26 @@
+/*
+ * dce100_resource.h
+ *
+ * Created on: 2016-01-20
+ * Author: qyang
+ */
+
+#ifndef DCE100_RESOURCE_H_
+#define DCE100_RESOURCE_H_
+
+struct dc;
+struct resource_pool;
+struct dc_validation_set;
+
+struct resource_pool *dce100_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+
+enum dc_status dce100_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+
+#endif /* DCE100_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
new file mode 100644
index 000000000000..98d956e2f218
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE110 = dce110_timing_generator.o \
+dce110_compressor.o dce110_hw_sequencer.o dce110_resource.o \
+dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
+dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o
+
+AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE110)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
new file mode 100644
index 000000000000..6923662413cd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#include "gmc/gmc_8_2_d.h"
+
+#include "include/logger_interface.h"
+
+#include "dce110_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp110->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp110->offsets.dmif_offset)
+
+static const struct dce110_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+
+static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce110_compressor *cp110,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp110->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ msleep(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ } else {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_SYNC,
+ "FBC status changed to %d", enabled);
+ }
+
+
+}
+
+void dce110_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
+ /* HW needs to do power measurement comparison. */
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CNTL,
+ FBC_COMP_CLK_GATE_EN);
+ }
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce110_compressor_enable_fbc(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (!dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL))) {
+
+ uint32_t addr;
+ uint32_t value, misc_value;
+
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp110->offsets = reg_offsets[params->inst];
+
+ /* Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* FBC usage with scatter & gather for dce110 */
+ misc_value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+
+ set_reg_field_value(misc_value, 1,
+ FBC_MISC, FBC_INVALIDATE_ON_ERROR);
+ set_reg_field_value(misc_value, 1,
+ FBC_MISC, FBC_DECOMPRESS_ERROR_CLEAR);
+ set_reg_field_value(misc_value, 0x14,
+ FBC_MISC, FBC_SLOW_REQ_INTERVAL);
+
+ dm_write_reg(compressor->ctx, mmFBC_MISC, misc_value);
+
+ /* Enable FBC */
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp110, true);
+ }
+}
+
+void dce110_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+}
+
+bool dce110_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+ if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst =
+ compressor->attached_inst;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void dce110_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE11 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce110_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE11:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE11.1 also needs to set new DCE11.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+struct compressor *dce110_compressor_create(struct dc_context *ctx)
+{
+ struct dce110_compressor *cp110 =
+ kzalloc(sizeof(struct dce110_compressor), GFP_KERNEL);
+
+ if (!cp110)
+ return NULL;
+
+ dce110_compressor_construct(cp110, ctx);
+ return &cp110->base;
+}
+
+void dce110_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE110_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
+
+bool dce110_get_required_compressed_surfacesize(struct fbc_input_info fbc_input_info,
+ struct fbc_requested_compressed_size size)
+{
+ bool result = false;
+
+ unsigned int max_x = FBC_MAX_X, max_y = FBC_MAX_Y;
+
+ get_max_support_fbc_buffersize(&max_x, &max_y);
+
+ if (fbc_input_info.dynamic_fbc_buffer_alloc == 0) {
+ /*
+ * For DCE11 here use Max HW supported size: HW Support up to 3840x2400 resolution
+ * or 18000 chunks.
+ */
+ size.preferred_size = size.min_size = align_to_chunks_number_per_line(max_x) * max_y * 4; /* (For FBC when LPT not supported). */
+ size.preferred_size_alignment = size.min_size_alignment = 0x100; /* For FBC when LPT not supported */
+ size.bits.preferred_must_be_framebuffer_pool = 1;
+ size.bits.min_must_be_framebuffer_pool = 1;
+
+ result = true;
+ }
+ /*
+ * Maybe to add registry key support with optional size here to override above
+ * for debugging purposes
+ */
+
+ return result;
+}
+
+
+void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y)
+{
+ *max_x = FBC_MAX_X;
+ *max_y = FBC_MAX_Y;
+
+ /* if (m_smallLocalFrameBufferMemory == 1)
+ * {
+ * *max_x = FBC_MAX_X_SG;
+ * *max_y = FBC_MAX_Y_SG;
+ * }
+ */
+}
+
+
+unsigned int controller_id_to_index(enum controller_id controller_id)
+{
+ unsigned int index = 0;
+
+ switch (controller_id) {
+ case CONTROLLER_ID_D0:
+ index = 0;
+ break;
+ case CONTROLLER_ID_D1:
+ index = 1;
+ break;
+ case CONTROLLER_ID_D2:
+ index = 2;
+ break;
+ case CONTROLLER_ID_D3:
+ index = 3;
+ break;
+ default:
+ break;
+ }
+ return index;
+}
+
+
+static const struct compressor_funcs dce110_compressor_funcs = {
+ .power_up_fbc = dce110_compressor_power_up_fbc,
+ .enable_fbc = dce110_compressor_enable_fbc,
+ .disable_fbc = dce110_compressor_disable_fbc,
+ .set_fbc_invalidation_triggers = dce110_compressor_set_fbc_invalidation_triggers,
+ .surface_address_and_pitch = dce110_compressor_program_compressed_surface_address_and_pitch,
+ .is_fbc_enabled_in_hw = dce110_compressor_is_fbc_enabled_in_hw
+};
+
+
+void dce110_compressor_construct(struct dce110_compressor *compressor,
+ struct dc_context *ctx)
+{
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+
+ /* for dce 11 always use one dram channel for lpt */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /*
+ * check if this system has more than 1 dram channel; if only 1 then lpt
+ * should not be supported
+ */
+
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ compressor->base.funcs = &dce110_compressor_funcs;
+
+#endif
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
new file mode 100644
index 000000000000..26c7335a1cbf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h
@@ -0,0 +1,81 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE110_H__
+#define __DC_COMPRESSOR_DCE110_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE110_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce110_compressor, base)
+
+struct dce110_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce110_compressor {
+ struct compressor base;
+ struct dce110_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce110_compressor_create(struct dc_context *ctx);
+
+void dce110_compressor_construct(struct dce110_compressor *cp110,
+ struct dc_context *ctx);
+
+void dce110_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce110_compressor_power_up_fbc(struct compressor *cp);
+
+void dce110_compressor_enable_fbc(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+void dce110_compressor_disable_fbc(struct compressor *cp);
+
+void dce110_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce110_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce110_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce110_compressor_enable_lpt(struct compressor *cp);
+
+void dce110_compressor_disable_lpt(struct compressor *cp);
+
+void dce110_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce110_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+void get_max_support_fbc_buffersize(unsigned int *max_x, unsigned int *max_y);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
new file mode 100644
index 000000000000..07ff8d2faf3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -0,0 +1,2991 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_bios_types.h"
+#include "core_types.h"
+#include "core_status.h"
+#include "resource.h"
+#include "dm_helpers.h"
+#include "dce110_hw_sequencer.h"
+#include "dce110_timing_generator.h"
+#include "dce/dce_hwseq.h"
+#include "gpio_service_interface.h"
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dce110_compressor.h"
+#endif
+
+#include "bios/bios_parser_helper.h"
+#include "timing_generator.h"
+#include "mem_input.h"
+#include "opp.h"
+#include "ipp.h"
+#include "transform.h"
+#include "stream_encoder.h"
+#include "link_encoder.h"
+#include "link_hwss.h"
+#include "clock_source.h"
+#include "abm.h"
+#include "audio.h"
+#include "reg_helper.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "custom_float.h"
+
+/*
+ * All values are in milliseconds;
+ * For eDP, after power-up/power/down,
+ * 300/500 msec max. delay from LCDVCC to black video generation
+ */
+#define PANEL_POWER_UP_TIMEOUT 300
+#define PANEL_POWER_DOWN_TIMEOUT 500
+#define HPD_CHECK_INTERVAL 10
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+struct dce110_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce110_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTCV_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_BLND(reg, id)\
+ (reg + reg_offsets[id].blnd)
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+#define MAX_WATERMARK 0xFFFF
+#define SAFE_NBP_MARK 0x7FFF
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+/***************************PIPE_CONTROL***********************************/
+static void dce110_init_pte(struct dc_context *ctx)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+
+ addr = mmUNP_DVMM_PTE_CONTROL;
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DVMM_PTE_CONTROL,
+ DVMM_USE_SINGLE_PTE);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE0);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE1);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+/**************************************************************************/
+
+static void enable_display_pipe_clock_gating(
+ struct dc_context *ctx,
+ bool clock_gating)
+{
+ /*TODO*/
+}
+
+static bool dce110_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (controller_id == underlay_idx)
+ controller_id = CONTROLLER_ID_UNDERLAY0 - 1;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ *
+ * Bios parser accepts controller_id = 6 as indicative of
+ * underlay pipe in dce110. But we do not support more
+ * than 3.
+ */
+ if (controller_id < CONTROLLER_ID_MAX - 1)
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce110_init_pte(ctx);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+static void build_prescale_params(struct ipp_prescale_params *prescale_params,
+ const struct dc_plane_state *plane_state)
+{
+ prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
+
+ switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ prescale_params->scale = 0x2020;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ prescale_params->scale = 0x2008;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ prescale_params->scale = 0x2000;
+ break;
+ default:
+ ASSERT(false);
+ break;
+ }
+}
+
+static bool dce110_set_input_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
+{
+ struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
+ const struct dc_transfer_func *tf = NULL;
+ struct ipp_prescale_params prescale_params = { 0 };
+ bool result = true;
+
+ if (ipp == NULL)
+ return false;
+
+ if (plane_state->in_transfer_func)
+ tf = plane_state->in_transfer_func;
+
+ build_prescale_params(&prescale_params, plane_state);
+ ipp->funcs->ipp_program_prescale(ipp, &prescale_params);
+
+ if (plane_state->gamma_correction && dce_use_lut(plane_state))
+ ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction);
+
+ if (tf == NULL) {
+ /* Default case if no input transfer function specified */
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ } else if (tf->type == TF_TYPE_PREDEFINED) {
+ switch (tf->tf) {
+ case TRANSFER_FUNCTION_SRGB:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_HW_xvYCC);
+ break;
+ case TRANSFER_FUNCTION_LINEAR:
+ ipp->funcs->ipp_set_degamma(ipp,
+ IPP_DEGAMMA_MODE_BYPASS);
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ } else if (tf->type == TF_TYPE_BYPASS) {
+ ipp->funcs->ipp_set_degamma(ipp, IPP_DEGAMMA_MODE_BYPASS);
+ } else {
+ /*TF_TYPE_DISTRIBUTED_POINTS - Not supported in DCE 11*/
+ result = false;
+ }
+
+ return result;
+}
+
+static bool convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].x,
+ &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].offset,
+ &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].slope,
+ &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].x,
+ &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].y,
+ &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[2].slope,
+ &fmt,
+ &arr_points[2].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(
+ rgb->red,
+ &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->green,
+ &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->blue,
+ &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_red,
+ &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_green,
+ &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_blue,
+ &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+
+static bool dce110_translate_regamma_to_hw_format(const struct dc_transfer_func
+ *output_tf, struct pwl_params *regamma_params)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ uint32_t i, j, k, seg_distr[16], increment, start_index, hw_points;
+
+ if (output_tf == NULL || regamma_params == NULL ||
+ output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ arr_points = regamma_params->arr_points;
+ rgb_resulted = regamma_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(regamma_params, 0, sizeof(struct pwl_params));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 16 segments
+ * segments are from 2^-11 to 2^5
+ */
+ segment_start = -11;
+ segment_end = 5;
+
+ seg_distr[0] = 2;
+ seg_distr[1] = 2;
+ seg_distr[2] = 2;
+ seg_distr[3] = 2;
+ seg_distr[4] = 2;
+ seg_distr[5] = 2;
+ seg_distr[6] = 3;
+ seg_distr[7] = 4;
+ seg_distr[8] = 4;
+ seg_distr[9] = 4;
+ seg_distr[10] = 4;
+ seg_distr[11] = 5;
+ seg_distr[12] = 5;
+ seg_distr[13] = 5;
+ seg_distr[14] = 5;
+ seg_distr[15] = 5;
+
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ */
+ segment_start = -10;
+ segment_end = 0;
+
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+ seg_distr[10] = -1;
+ seg_distr[11] = -1;
+ seg_distr[12] = -1;
+ seg_distr[13] = -1;
+ seg_distr[14] = -1;
+ seg_distr[15] = -1;
+ }
+
+ for (k = 0; k < 16; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = 32 / (1 << seg_distr[k]);
+ start_index = (segment_start + k + 25) * 32;
+ for (i = start_index; i < start_index + 32; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + 25) * 32;
+ rgb_resulted[hw_points - 1].red =
+ output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green =
+ output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue =
+ output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+ arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(
+ arr_points[0].y,
+ arr_points[0].x);
+
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+ arr_points[2].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+ arr_points[2].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ arr_points[2].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ regamma_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < 16 && i < 16; k++) {
+ if (seg_distr[k] != -1) {
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ regamma_params->arr_curve_points[i].offset =
+ regamma_params->arr_curve_points[k].
+ offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(
+ rgb_plus_1->red,
+ rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(
+ rgb_plus_1->green,
+ rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(
+ rgb_plus_1->blue,
+ rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+
+ convert_to_custom_float(rgb_resulted, arr_points, hw_points);
+
+ return true;
+}
+
+static bool dce110_set_output_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ struct transform *xfm = pipe_ctx->plane_res.xfm;
+
+ xfm->funcs->opp_power_on_regamma_lut(xfm, true);
+ xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
+
+ if (stream->out_transfer_func &&
+ stream->out_transfer_func->type ==
+ TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf ==
+ TRANSFER_FUNCTION_SRGB) {
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB);
+ } else if (dce110_translate_regamma_to_hw_format(
+ stream->out_transfer_func, &xfm->regamma_params)) {
+ xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params);
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER);
+ } else {
+ xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_BYPASS);
+ }
+
+ xfm->funcs->opp_power_on_regamma_lut(xfm, false);
+
+ return true;
+}
+
+static enum dc_status bios_parser_crtc_source_select(
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_bios *dcb;
+ /* call VBIOS table to set CRTC source for the HW
+ * encoder block
+ * note: video bios clears all FMT setting here. */
+ struct bp_crtc_source_select crtc_source_select = {0};
+ const struct dc_sink *sink = pipe_ctx->stream->sink;
+
+ crtc_source_select.engine_id = pipe_ctx->stream_res.stream_enc->id;
+ crtc_source_select.controller_id = pipe_ctx->pipe_idx + 1;
+ /*TODO: Need to un-hardcode color depth, dp_audio and account for
+ * the case where signal and sink signal is different (translator
+ * encoder)*/
+ crtc_source_select.signal = pipe_ctx->stream->signal;
+ crtc_source_select.enable_dp_audio = false;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ crtc_source_select.display_output_bit_depth = PANEL_6BIT_COLOR;
+ break;
+ case COLOR_DEPTH_888:
+ crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
+ break;
+ case COLOR_DEPTH_101010:
+ crtc_source_select.display_output_bit_depth = PANEL_10BIT_COLOR;
+ break;
+ case COLOR_DEPTH_121212:
+ crtc_source_select.display_output_bit_depth = PANEL_12BIT_COLOR;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ crtc_source_select.display_output_bit_depth = PANEL_8BIT_COLOR;
+ break;
+ }
+
+ dcb = sink->ctx->dc_bios;
+
+ if (BP_RESULT_OK != dcb->funcs->crtc_source_select(
+ dcb,
+ &crtc_source_select)) {
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ return DC_OK;
+}
+
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ ASSERT(pipe_ctx->stream);
+
+ if (pipe_ctx->stream_res.stream_enc == NULL)
+ return; /* this is not root pipe */
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+}
+
+void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
+{
+ enum dc_lane_count lane_count =
+ pipe_ctx->stream->sink->link->cur_link_settings.lane_count;
+
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* 1. update AVI info frame (HDMI, DP)
+ * we always need to update info frame
+ */
+ uint32_t active_total_with_borders;
+ uint32_t early_control = 0;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ /* TODOFPGA may change to hwss.update_info_frame */
+ dce110_update_info_frame(pipe_ctx);
+ /* enable early control to avoid corruption on DP monitor*/
+ active_total_with_borders =
+ timing->h_addressable
+ + timing->h_border_left
+ + timing->h_border_right;
+
+ if (lane_count != 0)
+ early_control = active_total_with_borders % lane_count;
+
+ if (early_control == 0)
+ early_control = lane_count;
+
+ tg->funcs->set_early_control(tg, early_control);
+
+ /* enable audio only within mode set */
+ if (pipe_ctx->stream_res.audio != NULL) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
+ }
+
+ /* For MST, there are multiply stream go to only one link.
+ * connect DIG back_end to front_end while enable_stream and
+ * disconnect them during disable_stream
+ * BY this, it is logic clean to separate stream and link */
+ link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
+
+}
+
+/*todo: cloned in stream enc, fix*/
+static bool is_panel_backlight_on(struct dce_hwseq *hws)
+{
+ uint32_t value;
+
+ REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+ return value;
+}
+
+static bool is_panel_powered_on(struct dce_hwseq *hws)
+{
+ uint32_t value;
+
+ REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &value);
+ return value == 1;
+}
+
+static enum bp_result link_transmitter_control(
+ struct dc_bios *bios,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+
+ result = bios->funcs->transmitter_control(bios, cntl);
+
+ return result;
+}
+
+/*
+ * @brief
+ * eDP only.
+ */
+void hwss_edp_wait_for_hpd_ready(
+ struct link_encoder *enc,
+ bool power_up)
+{
+ struct dc_context *ctx = enc->ctx;
+ struct graphics_object_id connector = enc->connector;
+ struct gpio *hpd;
+ bool edp_hpd_high = false;
+ uint32_t time_elapsed = 0;
+ uint32_t timeout = power_up ?
+ PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT;
+
+ if (dal_graphics_object_id_get_connector_id(connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (!power_up)
+ /*
+ * From KV, we will not HPD low after turning off VCC -
+ * instead, we will check the SW timer in power_up().
+ */
+ return;
+
+ /*
+ * When we power on/off the eDP panel,
+ * we need to wait until SENSE bit is high/low.
+ */
+
+ /* obtain HPD */
+ /* TODO what to do with this? */
+ hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
+
+ if (!hpd) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ dal_gpio_open(hpd, GPIO_MODE_INTERRUPT);
+
+ /* wait until timeout or panel detected */
+
+ do {
+ uint32_t detected = 0;
+
+ dal_gpio_get_value(hpd, &detected);
+
+ if (!(detected ^ power_up)) {
+ edp_hpd_high = true;
+ break;
+ }
+
+ msleep(HPD_CHECK_INTERVAL);
+
+ time_elapsed += HPD_CHECK_INTERVAL;
+ } while (time_elapsed < timeout);
+
+ dal_gpio_close(hpd);
+
+ dal_gpio_destroy_irq(&hpd);
+
+ if (false == edp_hpd_high) {
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: wait timed out!\n", __func__);
+ }
+}
+
+void hwss_edp_power_control(
+ struct link_encoder *enc,
+ bool power_up)
+{
+ struct dc_context *ctx = enc->ctx;
+ struct dce_hwseq *hwseq = ctx->dc->hwseq;
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result bp_result;
+
+
+ if (dal_graphics_object_id_get_connector_id(enc->connector)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (power_up != is_panel_powered_on(hwseq)) {
+ /* Send VBIOS command to prompt eDP panel power */
+
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: Panel Power action: %s\n",
+ __func__, (power_up ? "On":"Off"));
+
+ cntl.action = power_up ?
+ TRANSMITTER_CONTROL_POWER_ON :
+ TRANSMITTER_CONTROL_POWER_OFF;
+ cntl.transmitter = enc->transmitter;
+ cntl.connector_obj_id = enc->connector;
+ cntl.coherent = false;
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.hpd_sel = enc->hpd_source;
+
+ bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
+
+ if (bp_result != BP_RESULT_OK)
+ dm_logger_write(ctx->logger, LOG_ERROR,
+ "%s: Panel Power bp_result: %d\n",
+ __func__, bp_result);
+ } else {
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: Skipping Panel Power action: %s\n",
+ __func__, (power_up ? "On":"Off"));
+ }
+
+ hwss_edp_wait_for_hpd_ready(enc, true);
+}
+
+/*todo: cloned in stream enc, fix*/
+/*
+ * @brief
+ * eDP only. Control the backlight of the eDP panel
+ */
+void hwss_edp_backlight_control(
+ struct dc_link *link,
+ bool enable)
+{
+ struct dce_hwseq *hws = link->dc->hwseq;
+ struct dc_context *ctx = link->dc->ctx;
+ struct bp_transmitter_control cntl = { 0 };
+
+ if (dal_graphics_object_id_get_connector_id(link->link_id)
+ != CONNECTOR_ID_EDP) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (enable && is_panel_backlight_on(hws)) {
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: panel already powered up. Do nothing.\n",
+ __func__);
+ return;
+ }
+
+ /* Send VBIOS command to control eDP panel backlight */
+
+ dm_logger_write(ctx->logger, LOG_HW_RESUME_S3,
+ "%s: backlight action: %s\n",
+ __func__, (enable ? "On":"Off"));
+
+ cntl.action = enable ?
+ TRANSMITTER_CONTROL_BACKLIGHT_ON :
+ TRANSMITTER_CONTROL_BACKLIGHT_OFF;
+
+ /*cntl.engine_id = ctx->engine;*/
+ cntl.transmitter = link->link_enc->transmitter;
+ cntl.connector_obj_id = link->link_enc->connector;
+ /*todo: unhardcode*/
+ cntl.lanes_number = LANE_COUNT_FOUR;
+ cntl.hpd_sel = link->link_enc->hpd_source;
+
+ /* For eDP, the following delays might need to be considered
+ * after link training completed:
+ * idle period - min. accounts for required BS-Idle pattern,
+ * max. allows for source frame synchronization);
+ * 50 msec max. delay from valid video data from source
+ * to video on dislpay or backlight enable.
+ *
+ * Disable the delay for now.
+ * Enable it in the future if necessary.
+ */
+ /* dc_service_sleep_in_milliseconds(50); */
+ link_transmitter_control(link->dc->ctx->dc_bios, &cntl);
+}
+
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
+ if (pipe_ctx->stream_res.audio) {
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+ /*don't free audio if it is from retrain or internal disable stream*/
+ if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) {
+ /*we have to dynamic arbitrate the audio endpoints*/
+ pipe_ctx->stream_res.audio = NULL;
+ /*we free the resource, need reset is_audio_acquired*/
+ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
+ }
+
+ /* TODO: notify audio driver for if audio modes list changed
+ * add audio mode list change flag */
+ /* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
+ * stream->stream_engine_id);
+ */
+ }
+
+ /* blank at encoder level */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
+ hwss_edp_backlight_control(link, false);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ }
+ link->link_enc->funcs->connect_dig_be_to_fe(
+ link->link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
+}
+
+void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_link *link = pipe_ctx->stream->sink->link;
+
+ /* only 3 items below are used by unblank */
+ params.pixel_clk_khz =
+ pipe_ctx->stream->timing.pix_clk_khz;
+ params.link_settings.link_rate = link_settings->link_rate;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ if (link->connector_signal == SIGNAL_TYPE_EDP)
+ hwss_edp_backlight_control(link, true);
+}
+
+
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ if (pipe_ctx != NULL && pipe_ctx->stream_res.stream_enc != NULL)
+ pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
+}
+
+static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+{
+ switch (crtc_id) {
+ case CONTROLLER_ID_D0:
+ return DTO_SOURCE_ID0;
+ case CONTROLLER_ID_D1:
+ return DTO_SOURCE_ID1;
+ case CONTROLLER_ID_D2:
+ return DTO_SOURCE_ID2;
+ case CONTROLLER_ID_D3:
+ return DTO_SOURCE_ID3;
+ case CONTROLLER_ID_D4:
+ return DTO_SOURCE_ID4;
+ case CONTROLLER_ID_D5:
+ return DTO_SOURCE_ID5;
+ default:
+ return DTO_SOURCE_UNKNOWN;
+ }
+}
+
+static void build_audio_output(
+ struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_output *audio_output)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ audio_output->engine_id = pipe_ctx->stream_res.stream_enc->id;
+
+ audio_output->signal = pipe_ctx->stream->signal;
+
+ /* audio_crtc_info */
+
+ audio_output->crtc_info.h_total =
+ stream->timing.h_total;
+
+ /*
+ * Audio packets are sent during actual CRTC blank physical signal, we
+ * need to specify actual active signal portion
+ */
+ audio_output->crtc_info.h_active =
+ stream->timing.h_addressable
+ + stream->timing.h_border_left
+ + stream->timing.h_border_right;
+
+ audio_output->crtc_info.v_active =
+ stream->timing.v_addressable
+ + stream->timing.v_border_top
+ + stream->timing.v_border_bottom;
+
+ audio_output->crtc_info.pixel_repetition = 1;
+
+ audio_output->crtc_info.interlaced =
+ stream->timing.flags.INTERLACE;
+
+ audio_output->crtc_info.refresh_rate =
+ (stream->timing.pix_clk_khz*1000)/
+ (stream->timing.h_total*stream->timing.v_total);
+
+ audio_output->crtc_info.color_depth =
+ stream->timing.display_color_depth;
+
+ audio_output->crtc_info.requested_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+ audio_output->crtc_info.calculated_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+/*for HDMI, audio ACR is with deep color ratio factor*/
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) &&
+ audio_output->crtc_info.requested_pixel_clock ==
+ stream->timing.pix_clk_khz) {
+ if (pipe_ctx->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ audio_output->crtc_info.requested_pixel_clock =
+ audio_output->crtc_info.requested_pixel_clock/2;
+ audio_output->crtc_info.calculated_pixel_clock =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk/2;
+
+ }
+ }
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ audio_output->pll_info.dp_dto_source_clock_in_khz =
+ state->dis_clk->funcs->get_dp_ref_clk_frequency(
+ state->dis_clk);
+ }
+
+ audio_output->pll_info.feed_back_divider =
+ pipe_ctx->pll_settings.feedback_divider;
+
+ audio_output->pll_info.dto_source =
+ translate_to_dto_source(
+ pipe_ctx->pipe_idx + 1);
+
+ /* TODO hard code to enable for now. Need get from stream */
+ audio_output->pll_info.ss_enabled = true;
+
+ audio_output->pll_info.ss_percentage =
+ pipe_ctx->pll_settings.ss_percentage;
+}
+
+static void get_surface_visual_confirm_color(const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE * (4 - pipe_ctx->pipe_idx) / 4;
+
+ switch (pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB8888:
+ /* set boarder color to red */
+ color->color_r_cr = color_value;
+ break;
+
+ case PIXEL_FORMAT_ARGB2101010:
+ /* set boarder color to blue */
+ color->color_b_cb = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP8:
+ /* set boarder color to green */
+ color->color_g_y = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP10:
+ /* set boarder color to yellow */
+ color->color_g_y = color_value;
+ color->color_r_cr = color_value;
+ break;
+ case PIXEL_FORMAT_FP16:
+ /* set boarder color to white */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
+ color->color_g_y = color_value;
+ break;
+ default:
+ break;
+ }
+}
+
+static void program_scaler(const struct dc *dc,
+ const struct pipe_ctx *pipe_ctx)
+{
+ struct tg_color color = {0};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ /* TOFPGA */
+ if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
+ return;
+#endif
+
+ if (dc->debug.surface_visual_confirm)
+ get_surface_visual_confirm_color(pipe_ctx, &color);
+ else
+ color_space_to_black_color(dc,
+ pipe_ctx->stream->output_color_space,
+ &color);
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
+ pipe_ctx->plane_res.xfm,
+ pipe_ctx->plane_res.scl_data.lb_params.depth,
+ &pipe_ctx->stream->bit_depth_params);
+
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+ pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
+ pipe_ctx->stream_res.tg,
+ &color);
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data);
+}
+
+static enum dc_status dce110_prog_pixclk_crtc_otg(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+ pipe_ctx[pipe_ctx->pipe_idx];
+ struct tg_color black_color = {0};
+
+ if (!pipe_ctx_old->stream) {
+
+ /* program blank color */
+ color_space_to_black_color(dc,
+ stream->output_color_space, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ /*
+ * Must blank CRTC after disabling power gating and before any
+ * programming, otherwise CRTC will be hung in bad state
+ */
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+
+ if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ true);
+
+ pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx->stream_res.tg,
+ 0x182);
+ }
+
+ if (!pipe_ctx_old->stream) {
+ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(
+ pipe_ctx->stream_res.tg)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ }
+
+
+
+ return DC_OK;
+}
+
+static enum dc_status apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.
+ pipe_ctx[pipe_ctx->pipe_idx];
+
+ /* */
+ dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc);
+
+ /* FPGA does not program backend */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ return DC_OK;
+ }
+ /* TODO: move to stream encoder */
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ stream->sink->link->link_enc->funcs->setup(
+ stream->sink->link->link_enc,
+ pipe_ctx->stream->signal);
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.tg->inst,
+ stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
+
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->output_color_space);
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->phy_pix_clk,
+ pipe_ctx->stream_res.audio != NULL);
+
+ if (dc_is_dvi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ (pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
+ true : false);
+
+ resource_build_info_frame(pipe_ctx);
+ dce110_update_info_frame(pipe_ctx);
+ if (!pipe_ctx_old->stream) {
+ if (!pipe_ctx->stream->dpms_off)
+ core_link_enable_stream(context, pipe_ctx);
+ }
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+
+ pipe_ctx->stream->sink->link->psr_enabled = false;
+
+ return DC_OK;
+}
+
+/******************************************************************************/
+
+static void power_down_encoders(struct dc *dc)
+{
+ int i;
+ enum connector_id connector_id;
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ /* do not know BIOS back-front mapping, simply blank all. It will not
+ * hurt for non-DP
+ */
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ dc->res_pool->stream_enc[i]->funcs->dp_blank(
+ dc->res_pool->stream_enc[i]);
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id);
+ if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP)) {
+
+ if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ if (connector_id == CONNECTOR_ID_EDP)
+ signal = SIGNAL_TYPE_EDP;
+ }
+
+ dc->links[i]->link_enc->funcs->disable_output(
+ dc->links[i]->link_enc, signal, dc->links[i]);
+ }
+}
+
+static void power_down_controllers(struct dc *dc)
+{
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ dc->res_pool->timing_generators[i]->funcs->disable_crtc(
+ dc->res_pool->timing_generators[i]);
+ }
+}
+
+static void power_down_clock_sources(struct dc *dc)
+{
+ int i;
+
+ if (dc->res_pool->dp_clock_source->funcs->cs_power_down(
+ dc->res_pool->dp_clock_source) == false)
+ dm_error("Failed to power down pll! (dp clk src)\n");
+
+ for (i = 0; i < dc->res_pool->clk_src_count; i++) {
+ if (dc->res_pool->clock_sources[i]->funcs->cs_power_down(
+ dc->res_pool->clock_sources[i]) == false)
+ dm_error("Failed to power down pll! (clk src index=%d)\n", i);
+ }
+}
+
+static void power_down_all_hw_blocks(struct dc *dc)
+{
+ power_down_encoders(dc);
+
+ power_down_controllers(dc);
+
+ power_down_clock_sources(dc);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+#endif
+}
+
+static void disable_vga_and_power_gate_all_controllers(
+ struct dc *dc)
+{
+ int i;
+ struct timing_generator *tg;
+ struct dc_context *ctx = dc->ctx;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->disable_vga)
+ tg->funcs->disable_vga(tg);
+
+ /* Enable CLOCK gating for each pipe BEFORE controller
+ * powergating. */
+ enable_display_pipe_clock_gating(ctx,
+ true);
+
+ dc->hwss.power_down_front_end(dc, i);
+ }
+}
+
+/**
+ * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
+ * 1. Power down all DC HW blocks
+ * 2. Disable VGA engine on all controllers
+ * 3. Enable power gating for controller
+ * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS)
+ */
+void dce110_enable_accelerated_mode(struct dc *dc)
+{
+ power_down_all_hw_blocks(dc);
+
+ disable_vga_and_power_gate_all_controllers(dc);
+ bios_set_scratch_acc_mode_change(dc->ctx->dc_bios);
+}
+
+static uint32_t compute_pstate_blackout_duration(
+ struct bw_fixed blackout_duration,
+ const struct dc_stream_state *stream)
+{
+ uint32_t total_dest_line_time_ns;
+ uint32_t pstate_blackout_duration_ns;
+
+ pstate_blackout_duration_ns = 1000 * blackout_duration.value >> 24;
+
+ total_dest_line_time_ns = 1000000UL *
+ stream->timing.h_total /
+ stream->timing.pix_clk_khz +
+ pstate_blackout_duration_ns;
+
+ return total_dest_line_time_ns;
+}
+
+void dce110_set_displaymarks(
+ const struct dc *dc,
+ struct dc_state *context)
+{
+ uint8_t i, num_pipes;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
+
+ for (i = 0, num_pipes = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ uint32_t total_dest_line_time_ns;
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ total_dest_line_time_ns = compute_pstate_blackout_duration(
+ dc->bw_vbios->blackout_duration, pipe_ctx->stream);
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
+ pipe_ctx->plane_res.mi,
+ context->bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw.dce.urgent_wm_ns[num_pipes],
+ total_dest_line_time_ns);
+ if (i == underlay_idx) {
+ num_pipes++;
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
+ pipe_ctx->plane_res.mi,
+ context->bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw.dce.urgent_wm_ns[num_pipes],
+ total_dest_line_time_ns);
+ }
+ num_pipes++;
+ }
+}
+
+static void set_safe_displaymarks(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool)
+{
+ int i;
+ int underlay_idx = pool->underlay_pipe_index;
+ struct dce_watermarks max_marks = {
+ MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK };
+ struct dce_watermarks nbp_marks = {
+ SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK };
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL)
+ continue;
+
+ res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_display_marks(
+ res_ctx->pipe_ctx[i].plane_res.mi,
+ nbp_marks,
+ max_marks,
+ max_marks,
+ MAX_WATERMARK);
+
+ if (i == underlay_idx)
+ res_ctx->pipe_ctx[i].plane_res.mi->funcs->mem_input_program_chroma_display_marks(
+ res_ctx->pipe_ctx[i].plane_res.mi,
+ nbp_marks,
+ max_marks,
+ max_marks,
+ MAX_WATERMARK);
+
+ }
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+static void set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, int vmin, int vmax)
+{
+ int i = 0;
+ struct drr_params params = {0};
+
+ params.vertical_total_max = vmax;
+ params.vertical_total_min = vmin;
+
+ /* TODO: If multiple pipes are to be supported, you need
+ * some GSL stuff
+ */
+
+ for (i = 0; i < num_pipes; i++) {
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, &params);
+ }
+}
+
+static void get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position)
+{
+ int i = 0;
+
+ /* TODO: handle pipes > 1
+ */
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
+}
+
+static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events)
+{
+ unsigned int i;
+ unsigned int value = 0;
+
+ if (events->overlay_update)
+ value |= 0x100;
+ if (events->surface_update)
+ value |= 0x80;
+ if (events->cursor_update)
+ value |= 0x2;
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ value |= 0x84;
+#endif
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet.
+ * TODO: after mode set, pre_mode_set = false,
+ * may read PLL register to get pixel clock
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(
+ struct dc *dc,
+ struct dc_state *context,
+ bool pre_mode_set)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ if (!pre_mode_set) {
+ /* TODO: read ASIC register to get pixel clock */
+ ASSERT(0);
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk =
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+ }
+
+ if (max_pix_clk == 0)
+ ASSERT(0);
+
+ return max_pix_clk;
+}
+
+/*
+ * Find clock state based on clock requested. if clock value is 0, simply
+ * set clock state as requested without finding clock state by clock value
+ */
+
+static void apply_min_clocks(
+ struct dc *dc,
+ struct dc_state *context,
+ enum dm_pp_clocks_state *clocks_state,
+ bool pre_mode_set)
+{
+ struct state_dependent_clocks req_clocks = {0};
+
+ if (!pre_mode_set) {
+ /* set clock_state without verification */
+ if (context->dis_clk->funcs->set_min_clocks_state) {
+ context->dis_clk->funcs->set_min_clocks_state(
+ context->dis_clk, *clocks_state);
+ return;
+ }
+
+ /* TODO: This is incorrect. Figure out how to fix. */
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ context->dis_clk->cur_clocks_value.dispclk_in_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ context->dis_clk->cur_clocks_value.max_pixelclk_in_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ context->dis_clk->cur_clocks_value.max_non_dp_phyclk_in_khz,
+ pre_mode_set,
+ false);
+ return;
+ }
+
+ /* get the required state based on state dependent clocks:
+ * display clock and pixel clock
+ */
+ req_clocks.display_clk_khz = context->bw.dce.dispclk_khz;
+
+ req_clocks.pixel_clk_khz = get_max_pixel_clock_for_all_paths(
+ dc, context, true);
+
+ if (context->dis_clk->funcs->get_required_clocks_state) {
+ *clocks_state = context->dis_clk->funcs->get_required_clocks_state(
+ context->dis_clk, &req_clocks);
+ context->dis_clk->funcs->set_min_clocks_state(
+ context->dis_clk, *clocks_state);
+ } else {
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ req_clocks.display_clk_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ req_clocks.pixel_clk_khz,
+ pre_mode_set,
+ false);
+
+ context->dis_clk->funcs->apply_clock_voltage_request(
+ context->dis_clk,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ req_clocks.pixel_clk_khz,
+ pre_mode_set,
+ false);
+ }
+}
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+
+/*
+ * Check if FBC can be enabled
+ */
+static enum dc_status validate_fbc(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[0];
+
+ ASSERT(dc->fbc_compressor);
+
+ /* FBC memory should be allocated */
+ if (!dc->ctx->fbc_gpu_addr)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only supports single display */
+ if (context->stream_count != 1)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only supports eDP */
+ if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
+ return DC_ERROR_UNEXPECTED;
+
+ /* PSR should not be enabled */
+ if (pipe_ctx->stream->sink->link->psr_enabled)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Nothing to compress */
+ if (!pipe_ctx->plane_state)
+ return DC_ERROR_UNEXPECTED;
+
+ /* Only for non-linear tiling */
+ if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ return DC_ERROR_UNEXPECTED;
+
+ return DC_OK;
+}
+
+/*
+ * Enable FBC
+ */
+static enum dc_status enable_fbc(struct dc *dc,
+ struct dc_state *context)
+{
+ enum dc_status status = validate_fbc(dc, context);
+
+ if (status == DC_OK) {
+ /* Program GRPH COMPRESSED ADDRESS and PITCH */
+ struct compr_addr_and_pitch_params params = {0, 0, 0};
+ struct compressor *compr = dc->fbc_compressor;
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[0];
+
+ params.source_view_width =
+ pipe_ctx->stream->timing.h_addressable;
+ params.source_view_height =
+ pipe_ctx->stream->timing.v_addressable;
+
+ compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
+
+ compr->funcs->surface_address_and_pitch(compr, &params);
+ compr->funcs->set_fbc_invalidation_triggers(compr, 1);
+
+ compr->funcs->enable_fbc(compr, &params);
+ }
+ return status;
+}
+#endif
+
+static enum dc_status apply_ctx_to_hw_fpga(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream)
+ continue;
+
+ status = apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ if (status != DC_OK)
+ return status;
+ }
+
+ return DC_OK;
+}
+
+static void dce110_reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* Note: We need to disable output if clock sources change,
+ * since bios does optimization and doesn't apply if changing
+ * PHY when not already disabled.
+ */
+
+ /* Skip underlay pipe since it will be handled in commit surface*/
+ if (!pipe_ctx_old->stream || pipe_ctx_old->top_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ /* Disable if new stream is null. O/w, if stream is
+ * disabled already, no need to disable again.
+ */
+ if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
+ core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
+
+ pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
+ if (!hwss_wait_for_blank_complete(pipe_ctx_old->stream_res.tg)) {
+ dm_error("DC: failed to blank crtc!\n");
+ BREAK_TO_DEBUGGER();
+ }
+ pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg);
+ pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
+ pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
+
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+
+ dc->hwss.power_down_front_end(dc, pipe_ctx_old->pipe_idx);
+
+ pipe_ctx_old->stream = NULL;
+ }
+ }
+}
+
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status status;
+ int i;
+ enum dm_pp_clocks_state clocks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ /* Reset old context */
+ /* look up the targets that have been removed since last commit */
+ dc->hwss.reset_hw_ctx_wrap(dc, context);
+
+ /* Skip applying if no targets */
+ if (context->stream_count <= 0)
+ return DC_OK;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ apply_ctx_to_hw_fpga(dc, context);
+ return DC_OK;
+ }
+
+ /* Apply new context */
+ dcb->funcs->set_scratch_critical_state(dcb, true);
+
+ /* below is for real asic only */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL || pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream) {
+ if (pipe_ctx_old->clock_source != pipe_ctx->clock_source)
+ dce_crtc_switch_to_clk_src(dc->hwseq,
+ pipe_ctx->clock_source, i);
+ continue;
+ }
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, dc->ctx->dc_bios,
+ PIPE_GATING_CONTROL_DISABLE);
+ }
+
+ set_safe_displaymarks(&context->res_ctx, dc->res_pool);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+#endif
+ /*TODO: when pplib works*/
+ apply_min_clocks(dc, context, &clocks_state, true);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+ if (context->bw.dcn.calc_clk.fclk_khz
+ > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+ struct dm_pp_clock_for_voltage_req clock;
+
+ clock.clk_type = DM_PP_CLOCK_TYPE_FCLK;
+ clock.clocks_in_khz = context->bw.dcn.calc_clk.fclk_khz;
+ dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+ dc->current_state->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+ context->bw.dcn.cur_clk.fclk_khz = clock.clocks_in_khz;
+ }
+ if (context->bw.dcn.calc_clk.dcfclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ struct dm_pp_clock_for_voltage_req clock;
+
+ clock.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
+ clock.clocks_in_khz = context->bw.dcn.calc_clk.dcfclk_khz;
+ dm_pp_apply_clock_for_voltage_request(dc->ctx, &clock);
+ dc->current_state->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+ context->bw.dcn.cur_clk.dcfclk_khz = clock.clocks_in_khz;
+ }
+ if (context->bw.dcn.calc_clk.dispclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ context->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ }
+ } else
+#endif
+ if (context->bw.dce.dispclk_khz
+ > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ }
+ /* program audio wall clock. use HDMI as clock source if HDMI
+ * audio active. Otherwise, use DP as clock source
+ * first, loop to find any HDMI audio, if not, loop find DP audio
+ */
+ /* Setup audio rate clock source */
+ /* Issue:
+ * Audio lag happened on DP monitor when unplug a HDMI monitor
+ *
+ * Cause:
+ * In case of DP and HDMI connected or HDMI only, DCCG_AUDIO_DTO_SEL
+ * is set to either dto0 or dto1, audio should work fine.
+ * In case of DP connected only, DCCG_AUDIO_DTO_SEL should be dto1,
+ * set to dto0 will cause audio lag.
+ *
+ * Solution:
+ * Not optimized audio wall dto setup. When mode set, iterate pipe_ctx,
+ * find first available pipe with audio, setup audio wall DTO per topology
+ * instead of per pipe.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A)
+ continue;
+
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &audio_output.pll_info);
+ break;
+ }
+ }
+
+ /* no HDMI audio is found, try DP audio */
+ if (i == dc->res_pool->pipe_count) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (!dc_is_dp_signal(pipe_ctx->stream->signal))
+ continue;
+
+ if (pipe_ctx->stream_res.audio != NULL) {
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ pipe_ctx->stream_res.audio->funcs->wall_dto_setup(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &audio_output.pll_info);
+ break;
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx->stream && pipe_ctx_old->stream
+ && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (context->res_ctx.pipe_ctx[i].stream_res.audio != NULL) {
+
+ struct audio_output audio_output;
+
+ build_audio_output(context, pipe_ctx, &audio_output);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info,
+ &audio_output.crtc_info);
+
+ pipe_ctx->stream_res.audio->funcs->az_configure(
+ pipe_ctx->stream_res.audio,
+ pipe_ctx->stream->signal,
+ &audio_output.crtc_info,
+ &pipe_ctx->stream->audio_info);
+ }
+
+ status = apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ if (dc->hwss.power_on_front_end)
+ dc->hwss.power_on_front_end(dc, pipe_ctx, context);
+
+ if (DC_OK != status)
+ return status;
+ }
+
+ /* pplib is notified if disp_num changed */
+ dc->hwss.set_bandwidth(dc, context, true);
+
+ /* to save power */
+ apply_min_clocks(dc, context, &clocks_state, false);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ enable_fbc(dc, context);
+
+#endif
+
+ return DC_OK;
+}
+
+/*******************************************************************************
+ * Front End programming
+ ******************************************************************************/
+static void set_default_colors(struct pipe_ctx *pipe_ctx)
+{
+ struct default_adjustment default_adjust = { 0 };
+
+ default_adjust.force_hw_default = false;
+ if (pipe_ctx->plane_state == NULL)
+ default_adjust.in_color_space = COLOR_SPACE_SRGB;
+ else
+ default_adjust.in_color_space =
+ pipe_ctx->plane_state->color_space;
+ if (pipe_ctx->stream == NULL)
+ default_adjust.out_color_space = COLOR_SPACE_SRGB;
+ else
+ default_adjust.out_color_space =
+ pipe_ctx->stream->output_color_space;
+ default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
+ default_adjust.surface_pixel_format = pipe_ctx->plane_res.scl_data.format;
+
+ /* display color depth */
+ default_adjust.color_depth =
+ pipe_ctx->stream->timing.display_color_depth;
+
+ /* Lb color depth */
+ default_adjust.lb_color_depth = pipe_ctx->plane_res.scl_data.lb_params.depth;
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_default(
+ pipe_ctx->plane_res.xfm, &default_adjust);
+}
+
+
+/*******************************************************************************
+ * In order to turn on/off specific surface we will program
+ * Blender + CRTC
+ *
+ * In case that we have two surfaces and they have a different visibility
+ * we can't turn off the CRTC since it will turn off the entire display
+ *
+ * |----------------------------------------------- |
+ * |bottom pipe|curr pipe | | |
+ * |Surface |Surface | Blender | CRCT |
+ * |visibility |visibility | Configuration| |
+ * |------------------------------------------------|
+ * | off | off | CURRENT_PIPE | blank |
+ * | off | on | CURRENT_PIPE | unblank |
+ * | on | off | OTHER_PIPE | unblank |
+ * | on | on | BLENDING | unblank |
+ * -------------------------------------------------|
+ *
+ ******************************************************************************/
+static void program_surface_visibility(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum blnd_mode blender_mode = BLND_MODE_CURRENT_PIPE;
+ bool blank_target = false;
+
+ if (pipe_ctx->bottom_pipe) {
+
+ /* For now we are supporting only two pipes */
+ ASSERT(pipe_ctx->bottom_pipe->bottom_pipe == NULL);
+
+ if (pipe_ctx->bottom_pipe->plane_state->visible) {
+ if (pipe_ctx->plane_state->visible)
+ blender_mode = BLND_MODE_BLENDING;
+ else
+ blender_mode = BLND_MODE_OTHER_PIPE;
+
+ } else if (!pipe_ctx->plane_state->visible)
+ blank_target = true;
+
+ } else if (!pipe_ctx->plane_state->visible)
+ blank_target = true;
+
+ dce_set_blender_mode(dc->hwseq, pipe_ctx->pipe_idx, blender_mode);
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, blank_target);
+
+}
+
+static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+{
+ struct xfm_grph_csc_adjustment adjust;
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+}
+
+/**
+ * TODO REMOVE, USE UPDATE INSTEAD
+ */
+static void set_plane_config(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+ unsigned int i;
+
+ memset(&adjust, 0, sizeof(adjust));
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+ dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+
+ set_default_colors(pipe_ctx);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ tbl_entry.color_space =
+ pipe_ctx->stream->output_color_space;
+
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] =
+ pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+ (pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+ program_scaler(dc, pipe_ctx);
+
+ program_surface_visibility(dc, pipe_ctx);
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &plane_state->plane_size,
+ plane_state->rotation,
+ NULL,
+ false);
+ if (mi->funcs->set_blank)
+ mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+
+ if (dc->config.gpu_vm_support)
+ mi->funcs->mem_input_program_pte_vm(
+ pipe_ctx->plane_res.mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation);
+}
+
+static void update_plane_addr(const struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+
+ pipe_ctx->plane_res.mi->funcs->mem_input_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.mi,
+ &plane_state->address,
+ plane_state->flip_immediate);
+
+ plane_state->status.requested_address = plane_state->address;
+}
+
+void dce110_update_pending_status(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+
+ plane_state->status.is_flip_pending =
+ pipe_ctx->plane_res.mi->funcs->mem_input_is_flip_pending(
+ pipe_ctx->plane_res.mi);
+
+ if (plane_state->status.is_flip_pending && !plane_state->visible)
+ pipe_ctx->plane_res.mi->current_address = pipe_ctx->plane_res.mi->request_address;
+
+ plane_state->status.current_address = pipe_ctx->plane_res.mi->current_address;
+ if (pipe_ctx->plane_res.mi->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye) {
+ plane_state->status.is_right_eye =\
+ !pipe_ctx->stream_res.tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
+ }
+}
+
+void dce110_power_down(struct dc *dc)
+{
+ power_down_all_hw_blocks(dc);
+ disable_vga_and_power_gate_all_controllers(dc);
+}
+
+static bool wait_for_reset_trigger_to_occur(
+ struct dc_context *dc_ctx,
+ struct timing_generator *tg)
+{
+ bool rc = false;
+
+ /* To avoid endless loop we wait at most
+ * frames_to_wait_on_triggered_reset frames for the reset to occur. */
+ const uint32_t frames_to_wait_on_triggered_reset = 10;
+ uint32_t i;
+
+ for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
+
+ if (!tg->funcs->is_counter_moving(tg)) {
+ DC_ERROR("TG counter is not moving!\n");
+ break;
+ }
+
+ if (tg->funcs->did_triggered_reset_occur(tg)) {
+ rc = true;
+ /* usually occurs at i=1 */
+ DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
+ i);
+ break;
+ }
+
+ /* Wait for one frame. */
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ }
+
+ if (false == rc)
+ DC_ERROR("GSL: Timeout on reset trigger!\n");
+
+ return rc;
+}
+
+/* Enable timing synchronization for a group of Timing Generators. */
+static void dce110_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcp_gsl_params gsl_params = { 0 };
+ int i;
+
+ DC_SYNC_INFO("GSL: Setting-up...\n");
+
+ /* Designate a single TG in the group as a master.
+ * Since HW doesn't care which one, we always assign
+ * the 1st one in the group. */
+ gsl_params.gsl_group = 0;
+ gsl_params.gsl_master = grouped_pipes[0]->stream_res.tg->inst;
+
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
+ grouped_pipes[i]->stream_res.tg, &gsl_params);
+
+ /* Reset slave controllers on master VSync */
+ DC_SYNC_INFO("GSL: enabling trigger-reset\n");
+
+ for (i = 1 /* skip the master */; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg, gsl_params.gsl_group);
+
+
+
+ for (i = 1 /* skip the master */; i < group_size; i++) {
+ DC_SYNC_INFO("GSL: waiting for reset to occur.\n");
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
+ /* Regardless of success of the wait above, remove the reset or
+ * the driver will start timing out on Display requests. */
+ DC_SYNC_INFO("GSL: disabling trigger-reset.\n");
+ grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(grouped_pipes[i]->stream_res.tg);
+ }
+
+
+ /* GSL Vblank synchronization is a one time sync mechanism, assumption
+ * is that the sync'ed displays will not drift out of sync over time*/
+ DC_SYNC_INFO("GSL: Restoring register states.\n");
+ for (i = 0; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->tear_down_global_swap_lock(grouped_pipes[i]->stream_res.tg);
+
+ DC_SYNC_INFO("GSL: Set-up complete.\n");
+}
+
+static void init_hw(struct dc *dc)
+{
+ int i;
+ struct dc_bios *bp;
+ struct transform *xfm;
+ struct abm *abm;
+
+ bp = dc->ctx->dc_bios;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ xfm = dc->res_pool->transforms[i];
+ xfm->funcs->transform_reset(xfm);
+
+ dc->hwss.enable_display_power_gating(
+ dc, i, bp,
+ PIPE_GATING_CONTROL_INIT);
+ dc->hwss.enable_display_power_gating(
+ dc, i, bp,
+ PIPE_GATING_CONTROL_DISABLE);
+ dc->hwss.enable_display_pipe_clock_gating(
+ dc->ctx,
+ true);
+ }
+
+ dce_clock_gating_power_up(dc->hwseq, false);
+ /***************************************/
+
+ for (i = 0; i < dc->link_count; i++) {
+ /****************************************/
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector). */
+ struct dc_link *link = dc->links[i];
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ tg->funcs->disable_vga(tg);
+
+ /* Blank controller using driver code instead of
+ * command table. */
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+ struct audio *audio = dc->res_pool->audios[i];
+ audio->funcs->hw_init(audio);
+ }
+
+ abm = dc->res_pool->abm;
+ if (abm != NULL) {
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->power_up_fbc(dc->fbc_compressor);
+#endif
+
+}
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg)
+{
+ int j;
+ int num_cfgs = 0;
+
+ for (j = 0; j < context->stream_count; j++) {
+ int k;
+
+ const struct dc_stream_state *stream = context->streams[j];
+ struct dm_pp_single_disp_config *cfg =
+ &pp_display_cfg->disp_configs[num_cfgs];
+ const struct pipe_ctx *pipe_ctx = NULL;
+
+ for (k = 0; k < MAX_PIPES; k++)
+ if (stream == context->res_ctx.pipe_ctx[k].stream) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+
+ ASSERT(pipe_ctx != NULL);
+
+ num_cfgs++;
+ cfg->signal = pipe_ctx->stream->signal;
+ cfg->pipe_idx = pipe_ctx->pipe_idx;
+ cfg->src_height = stream->src.height;
+ cfg->src_width = stream->src.width;
+ cfg->ddi_channel_mapping =
+ stream->sink->link->ddi_channel_mapping.raw;
+ cfg->transmitter =
+ stream->sink->link->link_enc->transmitter;
+ cfg->link_settings.lane_count =
+ stream->sink->link->cur_link_settings.lane_count;
+ cfg->link_settings.link_rate =
+ stream->sink->link->cur_link_settings.link_rate;
+ cfg->link_settings.link_spread =
+ stream->sink->link->cur_link_settings.link_spread;
+ cfg->sym_clock = stream->phy_pix_clk;
+ /* Round v_refresh*/
+ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+ cfg->v_refresh /= stream->timing.h_total;
+ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+ / stream->timing.v_total;
+ }
+
+ pp_display_cfg->display_count = num_cfgs;
+}
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+ uint8_t j;
+ uint32_t min_vertical_blank_time = -1;
+
+ for (j = 0; j < context->stream_count; j++) {
+ struct dc_stream_state *stream = context->streams[j];
+ uint32_t vertical_blank_in_pixels = 0;
+ uint32_t vertical_blank_time = 0;
+
+ vertical_blank_in_pixels = stream->timing.h_total *
+ (stream->timing.v_total
+ - stream->timing.v_addressable);
+
+ vertical_blank_time = vertical_blank_in_pixels
+ * 1000 / stream->timing.pix_clk_khz;
+
+ if (min_vertical_blank_time > vertical_blank_time)
+ min_vertical_blank_time = vertical_blank_time;
+ }
+
+ return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+ const struct dc *dc,
+ int required_sclk)
+{
+ int i;
+
+ /*
+ * Some asics do not give us sclk levels, so we just report the actual
+ * required sclk
+ */
+ if (dc->sclk_lvls.num_levels == 0)
+ return required_sclk;
+
+ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+ return dc->sclk_lvls.clocks_in_khz[i];
+ }
+ /*
+ * even maximum level could not satisfy requirement, this
+ * is unexpected at this stage, should have been caught at
+ * validation time
+ */
+ ASSERT(0);
+ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync =
+ context->bw.dce.all_displays_in_sync;
+ pp_display_cfg->nb_pstate_switch_disable =
+ context->bw.dce.nbp_state_change_enable == false;
+ pp_display_cfg->cpu_cc6_disable =
+ context->bw.dce.cpuc_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_disable =
+ context->bw.dce.cpup_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_separation_time =
+ context->bw.dce.blackout_recovery_time_us;
+
+ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER;
+
+ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+ dc,
+ context->bw.dce.sclk_khz);
+
+ pp_display_cfg->min_engine_clock_deep_sleep_khz
+ = context->bw.dce.sclk_deep_sleep_khz;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /* TODO: dce11.2*/
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+ pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ /* TODO: is this still applicable?*/
+ if (pp_display_cfg->display_count == 1) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index =
+ pp_display_cfg->disp_configs[0].pipe_idx;
+ pp_display_cfg->line_time_in_us = timing->h_total * 1000
+ / timing->pix_clk_khz;
+ }
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+static void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ dce110_set_displaymarks(dc, context);
+
+ if (decrease_allowed || context->bw.dce.dispclk_khz > dc->current_state->bw.dce.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dce.dispclk_khz * 115 / 100);
+ dc->current_state->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+
+ pplib_apply_display_requirements(dc, context);
+}
+
+static void dce110_program_front_end_for_pipe(
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct mem_input *mi = pipe_ctx->plane_res.mi;
+ struct pipe_ctx *old_pipe = NULL;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct xfm_grph_csc_adjustment adjust;
+ struct out_csc_color_matrix tbl_entry;
+ struct pipe_ctx *cur_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+ unsigned int i;
+
+ memset(&tbl_entry, 0, sizeof(tbl_entry));
+
+ if (dc->current_state)
+ old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+ dce_enable_fe_clock(dc->hwseq, pipe_ctx->pipe_idx, true);
+
+ set_default_colors(pipe_ctx);
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ tbl_entry.color_space =
+ pipe_ctx->stream->output_color_space;
+
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] =
+ pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment
+ (pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust);
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
+
+ program_scaler(dc, pipe_ctx);
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ if (dc->fbc_compressor && old_pipe->stream) {
+ if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+ else
+ enable_fbc(dc, dc->current_state);
+ }
+#endif
+
+ mi->funcs->mem_input_program_surface_config(
+ mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &plane_state->plane_size,
+ plane_state->rotation,
+ NULL,
+ false);
+ if (mi->funcs->set_blank)
+ mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible);
+
+ if (dc->config.gpu_vm_support)
+ mi->funcs->mem_input_program_pte_vm(
+ pipe_ctx->plane_res.mi,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation);
+
+ /* Moved programming gamma from dc to hwss */
+ if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+
+ dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;"
+ "clip: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->plane_state,
+ pipe_ctx->plane_state->address.grph.addr.high_part,
+ pipe_ctx->plane_state->address.grph.addr.low_part,
+ pipe_ctx->plane_state->src_rect.x,
+ pipe_ctx->plane_state->src_rect.y,
+ pipe_ctx->plane_state->src_rect.width,
+ pipe_ctx->plane_state->src_rect.height,
+ pipe_ctx->plane_state->dst_rect.x,
+ pipe_ctx->plane_state->dst_rect.y,
+ pipe_ctx->plane_state->dst_rect.width,
+ pipe_ctx->plane_state->dst_rect.height,
+ pipe_ctx->plane_state->clip_rect.x,
+ pipe_ctx->plane_state->clip_rect.y,
+ pipe_ctx->plane_state->clip_rect.width,
+ pipe_ctx->plane_state->clip_rect.height);
+
+ dm_logger_write(dc->ctx->logger, LOG_SURFACE,
+ "Pipe %d: width, height, x, y\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+}
+
+static void dce110_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context)
+{
+ int i, be_idx;
+
+ if (num_planes == 0)
+ return;
+
+ be_idx = -1;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (stream == context->res_ctx.pipe_ctx[i].stream) {
+ be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
+ break;
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* Need to allocate mem before program front end for Fiji */
+ if (pipe_ctx->plane_res.mi != NULL)
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(
+ pipe_ctx->plane_res.mi,
+ pipe_ctx->stream->timing.h_total,
+ pipe_ctx->stream->timing.v_total,
+ pipe_ctx->stream->timing.pix_clk_khz,
+ context->stream_count);
+
+ dce110_program_front_end_for_pipe(dc, pipe_ctx);
+ program_surface_visibility(dc, pipe_ctx);
+
+ }
+}
+
+static void dce110_power_down_fe(struct dc *dc, int fe_idx)
+{
+ /* Do not power down fe when stream is active on dce*/
+ if (dc->current_state->res_ctx.pipe_ctx[fe_idx].stream)
+ return;
+
+ dc->hwss.enable_display_power_gating(
+ dc, fe_idx, dc->ctx->dc_bios, PIPE_GATING_CONTROL_ENABLE);
+
+ dc->res_pool->transforms[fe_idx]->funcs->transform_reset(
+ dc->res_pool->transforms[fe_idx]);
+}
+
+static void dce110_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx)
+{
+ /* do nothing*/
+}
+
+static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix)
+{
+ int i;
+ struct out_csc_color_matrix tbl_entry;
+
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ enum dc_color_space color_space =
+ pipe_ctx->stream->output_color_space;
+
+ //uint16_t matrix[12];
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = color_space;
+ //tbl_entry.regval = matrix;
+ pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
+ }
+}
+
+static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
+
+static void optimize_shared_resources(struct dc *dc) {}
+
+static const struct hw_sequencer_funcs dce110_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+ .init_hw = init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+ .set_plane_config = set_plane_config,
+ .update_plane_addr = update_plane_addr,
+ .update_pending_status = dce110_update_pending_status,
+ .set_input_transfer_func = dce110_set_input_transfer_func,
+ .set_output_transfer_func = dce110_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dce110_enable_timing_synchronization,
+ .update_info_frame = dce110_update_info_frame,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .enable_display_pipe_clock_gating = enable_display_pipe_clock_gating,
+ .enable_display_power_gating = dce110_enable_display_power_gating,
+ .power_down_front_end = dce110_power_down_fe,
+ .pipe_control_lock = dce_pipe_control_lock,
+ .set_bandwidth = dce110_set_bandwidth,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+ .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap,
+ .prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg,
+ .setup_stereo = NULL,
+ .set_avmute = dce110_set_avmute,
+ .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+ .ready_shared_resources = ready_shared_resources,
+ .optimize_shared_resources = optimize_shared_resources,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control,
+};
+
+void dce110_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dce110_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
new file mode 100644
index 000000000000..4d72bb99be93
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -0,0 +1,81 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE110_H__
+#define __DC_HWSS_DCE110_H__
+
+#include "core_types.h"
+
+#define GAMMA_HW_POINTS_NUM 256
+struct dc;
+struct dc_state;
+struct dm_pp_display_configuration;
+
+void dce110_hw_sequencer_construct(struct dc *dc);
+
+enum dc_status dce110_apply_ctx_to_hw(
+ struct dc *dc,
+ struct dc_state *context);
+
+void dce110_set_display_clock(struct dc_state *context);
+
+void dce110_set_displaymarks(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
+
+void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+
+void dce110_update_info_frame(struct pipe_ctx *pipe_ctx);
+
+void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+void dce110_enable_accelerated_mode(struct dc *dc);
+
+void dce110_power_down(struct dc *dc);
+
+void dce110_update_pending_status(struct pipe_ctx *pipe_ctx);
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+void hwss_edp_power_control(
+ struct link_encoder *enc,
+ bool power_up);
+
+void hwss_edp_backlight_control(
+ struct dc_link *link,
+ bool enable);
+
+#endif /* __DC_HWSS_DCE110_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
new file mode 100644
index 000000000000..a06c6024deb4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
@@ -0,0 +1,1052 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+/* TODO: this needs to be looked at, used by Stella's workaround*/
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+
+#include "include/logger_interface.h"
+#include "inc/dce_calcs.h"
+
+#include "dce/dce_mem_input.h"
+
+static void set_flip_control(
+ struct dce_mem_input *mem_input110,
+ bool immediate)
+{
+ uint32_t value = 0;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_FLIP_CONTROL);
+
+ set_reg_field_value(value, 1,
+ UNP_FLIP_CONTROL,
+ GRPH_SURFACE_UPDATE_PENDING_MODE);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_FLIP_CONTROL,
+ value);
+}
+
+/* chroma part */
+static void program_pri_addr_c(
+ struct dce_mem_input *mem_input110,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ uint32_t value = 0;
+ uint32_t temp = 0;
+ /*high register MUST be programmed first*/
+ temp = address.high_part &
+UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ value);
+
+ temp = 0;
+ value = 0;
+ temp = address.low_part >>
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C__GRPH_PRIMARY_SURFACE_ADDRESS_C__SHIFT;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_C,
+ GRPH_PRIMARY_SURFACE_ADDRESS_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_C,
+ value);
+}
+
+/* luma part */
+static void program_pri_addr_l(
+ struct dce_mem_input *mem_input110,
+ PHYSICAL_ADDRESS_LOC address)
+{
+ uint32_t value = 0;
+ uint32_t temp = 0;
+
+ /*high register MUST be programmed first*/
+ temp = address.high_part &
+UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L__GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L_MASK;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
+ GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH_L,
+ value);
+
+ temp = 0;
+ value = 0;
+ temp = address.low_part >>
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L__GRPH_PRIMARY_SURFACE_ADDRESS_L__SHIFT;
+
+ set_reg_field_value(value, temp,
+ UNP_GRPH_PRIMARY_SURFACE_ADDRESS_L,
+ GRPH_PRIMARY_SURFACE_ADDRESS_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PRIMARY_SURFACE_ADDRESS_L,
+ value);
+}
+
+static void program_addr(
+ struct dce_mem_input *mem_input110,
+ const struct dc_plane_address *addr)
+{
+ switch (addr->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ program_pri_addr_l(
+ mem_input110,
+ addr->grph.addr);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ program_pri_addr_c(
+ mem_input110,
+ addr->video_progressive.chroma_addr);
+ program_pri_addr_l(
+ mem_input110,
+ addr->video_progressive.luma_addr);
+ break;
+ default:
+ /* not supported */
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+static void enable(struct dce_mem_input *mem_input110)
+{
+ uint32_t value = 0;
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_ENABLE);
+ set_reg_field_value(value, 1, UNP_GRPH_ENABLE, GRPH_ENABLE);
+ dm_write_reg(mem_input110->base.ctx,
+ mmUNP_GRPH_ENABLE,
+ value);
+}
+
+static void program_tiling(
+ struct dce_mem_input *mem_input110,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(value, info->gfx8.num_banks,
+ UNP_GRPH_CONTROL, GRPH_NUM_BANKS);
+
+ set_reg_field_value(value, info->gfx8.bank_width,
+ UNP_GRPH_CONTROL, GRPH_BANK_WIDTH_L);
+
+ set_reg_field_value(value, info->gfx8.bank_height,
+ UNP_GRPH_CONTROL, GRPH_BANK_HEIGHT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_aspect,
+ UNP_GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_split,
+ UNP_GRPH_CONTROL, GRPH_TILE_SPLIT_L);
+
+ set_reg_field_value(value, info->gfx8.tile_mode,
+ UNP_GRPH_CONTROL, GRPH_MICRO_TILE_MODE_L);
+
+ set_reg_field_value(value, info->gfx8.pipe_config,
+ UNP_GRPH_CONTROL, GRPH_PIPE_CONFIG);
+
+ set_reg_field_value(value, info->gfx8.array_mode,
+ UNP_GRPH_CONTROL, GRPH_ARRAY_MODE);
+
+ set_reg_field_value(value, 1,
+ UNP_GRPH_CONTROL, GRPH_COLOR_EXPANSION_MODE);
+
+ set_reg_field_value(value, 0,
+ UNP_GRPH_CONTROL, GRPH_Z);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL,
+ value);
+
+ value = 0;
+
+ set_reg_field_value(value, info->gfx8.bank_width_c,
+ UNP_GRPH_CONTROL_C, GRPH_BANK_WIDTH_C);
+
+ set_reg_field_value(value, info->gfx8.bank_height_c,
+ UNP_GRPH_CONTROL_C, GRPH_BANK_HEIGHT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_aspect_c,
+ UNP_GRPH_CONTROL_C, GRPH_MACRO_TILE_ASPECT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_split_c,
+ UNP_GRPH_CONTROL_C, GRPH_TILE_SPLIT_C);
+
+ set_reg_field_value(value, info->gfx8.tile_mode_c,
+ UNP_GRPH_CONTROL_C, GRPH_MICRO_TILE_MODE_C);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_C,
+ value);
+}
+
+static void program_size_and_rotation(
+ struct dce_mem_input *mem_input110,
+ enum dc_rotation_angle rotation,
+ const union plane_size *plane_size)
+{
+ uint32_t value = 0;
+ union plane_size local_size = *plane_size;
+
+ if (rotation == ROTATION_ANGLE_90 ||
+ rotation == ROTATION_ANGLE_270) {
+
+ uint32_t swap;
+ swap = local_size.video.luma_size.x;
+ local_size.video.luma_size.x =
+ local_size.video.luma_size.y;
+ local_size.video.luma_size.y = swap;
+
+ swap = local_size.video.luma_size.width;
+ local_size.video.luma_size.width =
+ local_size.video.luma_size.height;
+ local_size.video.luma_size.height = swap;
+
+ swap = local_size.video.chroma_size.x;
+ local_size.video.chroma_size.x =
+ local_size.video.chroma_size.y;
+ local_size.video.chroma_size.y = swap;
+
+ swap = local_size.video.chroma_size.width;
+ local_size.video.chroma_size.width =
+ local_size.video.chroma_size.height;
+ local_size.video.chroma_size.height = swap;
+ }
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_pitch,
+ UNP_GRPH_PITCH_L, GRPH_PITCH_L);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PITCH_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_pitch,
+ UNP_GRPH_PITCH_C, GRPH_PITCH_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_PITCH_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_X_START_L, GRPH_X_START_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_START_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_X_START_C, GRPH_X_START_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_START_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_Y_START_L, GRPH_Y_START_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_START_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ UNP_GRPH_Y_START_C, GRPH_Y_START_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_START_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_size.x +
+ local_size.video.luma_size.width,
+ UNP_GRPH_X_END_L, GRPH_X_END_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_END_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_size.x +
+ local_size.video.chroma_size.width,
+ UNP_GRPH_X_END_C, GRPH_X_END_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_X_END_C,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.luma_size.y +
+ local_size.video.luma_size.height,
+ UNP_GRPH_Y_END_L, GRPH_Y_END_L);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_END_L,
+ value);
+
+ value = 0;
+ set_reg_field_value(value, local_size.video.chroma_size.y +
+ local_size.video.chroma_size.height,
+ UNP_GRPH_Y_END_C, GRPH_Y_END_C);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_Y_END_C,
+ value);
+
+ value = 0;
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ set_reg_field_value(value, 3,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ case ROTATION_ANGLE_180:
+ set_reg_field_value(value, 2,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ case ROTATION_ANGLE_270:
+ set_reg_field_value(value, 1,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ default:
+ set_reg_field_value(value, 0,
+ UNP_HW_ROTATION, ROTATION_ANGLE);
+ break;
+ }
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_HW_ROTATION,
+ value);
+}
+
+static void program_pixel_format(
+ struct dce_mem_input *mem_input110,
+ enum surface_pixel_format format)
+{
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ uint32_t value;
+ uint8_t grph_depth;
+ uint8_t grph_format;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+ grph_depth = 0;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ grph_depth = 1;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ grph_depth = 2;
+ grph_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ grph_depth = 3;
+ grph_format = 0;
+ break;
+ default:
+ grph_depth = 2;
+ grph_format = 0;
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ grph_depth,
+ UNP_GRPH_CONTROL,
+ GRPH_DEPTH);
+ set_reg_field_value(
+ value,
+ grph_format,
+ UNP_GRPH_CONTROL,
+ GRPH_FORMAT);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL,
+ value);
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP);
+
+ /* VIDEO FORMAT 0 */
+ set_reg_field_value(
+ value,
+ 0,
+ UNP_GRPH_CONTROL_EXP,
+ VIDEO_FORMAT);
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP,
+ value);
+
+ } else {
+ /* Video 422 and 420 needs UNP_GRPH_CONTROL_EXP programmed */
+ uint32_t value;
+ uint8_t video_format;
+
+ value = dm_read_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ video_format = 2;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ video_format = 3;
+ break;
+ default:
+ video_format = 0;
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ video_format,
+ UNP_GRPH_CONTROL_EXP,
+ VIDEO_FORMAT);
+
+ dm_write_reg(
+ mem_input110->base.ctx,
+ mmUNP_GRPH_CONTROL_EXP,
+ value);
+ }
+}
+
+bool dce_mem_input_v_is_surface_pending(struct mem_input *mem_input)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+ uint32_t value;
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_GRPH_UPDATE);
+
+ if (get_reg_field_value(value, UNP_GRPH_UPDATE,
+ GRPH_SURFACE_UPDATE_PENDING))
+ return true;
+
+ mem_input->current_address = mem_input->request_address;
+ return false;
+}
+
+bool dce_mem_input_v_program_surface_flip_and_addr(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+
+ set_flip_control(mem_input110, flip_immediate);
+ program_addr(mem_input110,
+ address);
+
+ mem_input->request_address = *address;
+
+ return true;
+}
+
+/* Scatter Gather param tables */
+static const unsigned int dvmm_Hw_Setting_2DTiling[4][9] = {
+ { 8, 64, 64, 8, 8, 1, 4, 0, 0},
+ { 16, 64, 32, 8, 16, 1, 8, 0, 0},
+ { 32, 32, 32, 16, 16, 1, 8, 0, 0},
+ { 64, 8, 32, 16, 16, 1, 8, 0, 0}, /* fake */
+};
+
+static const unsigned int dvmm_Hw_Setting_1DTiling[4][9] = {
+ { 8, 512, 8, 1, 0, 1, 0, 0, 0}, /* 0 for invalid */
+ { 16, 256, 8, 2, 0, 1, 0, 0, 0},
+ { 32, 128, 8, 4, 0, 1, 0, 0, 0},
+ { 64, 64, 8, 4, 0, 1, 0, 0, 0}, /* fake */
+};
+
+static const unsigned int dvmm_Hw_Setting_Linear[4][9] = {
+ { 8, 4096, 1, 8, 0, 1, 0, 0, 0},
+ { 16, 2048, 1, 8, 0, 1, 0, 0, 0},
+ { 32, 1024, 1, 8, 0, 1, 0, 0, 0},
+ { 64, 512, 1, 8, 0, 1, 0, 0, 0}, /* new for 64bpp from HW */
+};
+
+/* Helper to get table entry from surface info */
+static const unsigned int *get_dvmm_hw_setting(
+ union dc_tiling_info *tiling_info,
+ enum surface_pixel_format format,
+ bool chroma)
+{
+ enum bits_per_pixel {
+ bpp_8 = 0,
+ bpp_16,
+ bpp_32,
+ bpp_64
+ } bpp;
+
+ if (format >= SURFACE_PIXEL_FORMAT_INVALID)
+ bpp = bpp_32;
+ else if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ bpp = chroma ? bpp_16 : bpp_8;
+ else
+ bpp = bpp_8;
+
+ switch (tiling_info->gfx8.array_mode) {
+ case DC_ARRAY_1D_TILED_THIN1:
+ case DC_ARRAY_1D_TILED_THICK:
+ case DC_ARRAY_PRT_TILED_THIN1:
+ return dvmm_Hw_Setting_1DTiling[bpp];
+ case DC_ARRAY_2D_TILED_THIN1:
+ case DC_ARRAY_2D_TILED_THICK:
+ case DC_ARRAY_2D_TILED_X_THICK:
+ case DC_ARRAY_PRT_2D_TILED_THIN1:
+ case DC_ARRAY_PRT_2D_TILED_THICK:
+ return dvmm_Hw_Setting_2DTiling[bpp];
+ case DC_ARRAY_LINEAR_GENERAL:
+ case DC_ARRAY_LINEAR_ALLIGNED:
+ return dvmm_Hw_Setting_Linear[bpp];
+ default:
+ return dvmm_Hw_Setting_2DTiling[bpp];
+ }
+}
+
+void dce_mem_input_v_program_pte_vm(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+ const unsigned int *pte = get_dvmm_hw_setting(tiling_info, format, false);
+ const unsigned int *pte_chroma = get_dvmm_hw_setting(tiling_info, format, true);
+
+ unsigned int page_width = 0;
+ unsigned int page_height = 0;
+ unsigned int page_width_chroma = 0;
+ unsigned int page_height_chroma = 0;
+ unsigned int temp_page_width = pte[1];
+ unsigned int temp_page_height = pte[2];
+ unsigned int min_pte_before_flip = 0;
+ unsigned int min_pte_before_flip_chroma = 0;
+ uint32_t value = 0;
+
+ while ((temp_page_width >>= 1) != 0)
+ page_width++;
+ while ((temp_page_height >>= 1) != 0)
+ page_height++;
+
+ temp_page_width = pte_chroma[1];
+ temp_page_height = pte_chroma[2];
+ while ((temp_page_width >>= 1) != 0)
+ page_width_chroma++;
+ while ((temp_page_height >>= 1) != 0)
+ page_height_chroma++;
+
+ switch (rotation) {
+ case ROTATION_ANGLE_90:
+ case ROTATION_ANGLE_270:
+ min_pte_before_flip = pte[4];
+ min_pte_before_flip_chroma = pte_chroma[4];
+ break;
+ default:
+ min_pte_before_flip = pte[3];
+ min_pte_before_flip_chroma = pte_chroma[3];
+ break;
+ }
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT);
+ /* TODO: un-hardcode requestlimit */
+ set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_L);
+ set_reg_field_value(value, 0xff, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT, UNP_PIPE_OUTSTANDING_REQUEST_LIMIT_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_PIPE_OUTSTANDING_REQUEST_LIMIT, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL);
+ set_reg_field_value(value, page_width, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_WIDTH);
+ set_reg_field_value(value, page_height, UNP_DVMM_PTE_CONTROL, DVMM_PAGE_HEIGHT);
+ set_reg_field_value(value, min_pte_before_flip, UNP_DVMM_PTE_CONTROL, DVMM_MIN_PTE_BEFORE_FLIP);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL);
+ set_reg_field_value(value, pte[5], UNP_DVMM_PTE_ARB_CONTROL, DVMM_PTE_REQ_PER_CHUNK);
+ set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL, DVMM_MAX_PTE_REQ_OUTSTANDING);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C);
+ set_reg_field_value(value, page_width_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_WIDTH_C);
+ set_reg_field_value(value, page_height_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_PAGE_HEIGHT_C);
+ set_reg_field_value(value, min_pte_before_flip_chroma, UNP_DVMM_PTE_CONTROL_C, DVMM_MIN_PTE_BEFORE_FLIP_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_CONTROL_C, value);
+
+ value = dm_read_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C);
+ set_reg_field_value(value, pte_chroma[5], UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_PTE_REQ_PER_CHUNK_C);
+ set_reg_field_value(value, 0xff, UNP_DVMM_PTE_ARB_CONTROL_C, DVMM_MAX_PTE_REQ_OUTSTANDING_C);
+ dm_write_reg(mem_input110->base.ctx, mmUNP_DVMM_PTE_ARB_CONTROL_C, value);
+}
+
+void dce_mem_input_v_program_surface_config(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizotal_mirror)
+{
+ struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input);
+
+ enable(mem_input110);
+ program_tiling(mem_input110, tiling_info, format);
+ program_size_and_rotation(mem_input110, rotation, plane_size);
+ program_pixel_format(mem_input110, format);
+}
+
+static void program_urgency_watermark(
+ const struct dc_context *ctx,
+ const uint32_t urgency_addr,
+ const uint32_t wm_addr,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ /* register value */
+ uint32_t urgency_cntl = 0;
+ uint32_t wm_mask_cntl = 0;
+
+ /*Write mask to enable reading/writing of watermark set A*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ urgency_cntl = dm_read_reg(ctx, urgency_addr);
+
+ set_reg_field_value(
+ urgency_cntl,
+ marks_low.a_mark,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_LOW_WATERMARK);
+
+ set_reg_field_value(
+ urgency_cntl,
+ total_dest_line_time_ns,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_HIGH_WATERMARK);
+ dm_write_reg(ctx, urgency_addr, urgency_cntl);
+
+ /*Write mask to enable reading/writing of watermark set B*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ URGENCY_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ urgency_cntl = dm_read_reg(ctx, urgency_addr);
+
+ set_reg_field_value(urgency_cntl,
+ marks_low.b_mark,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_LOW_WATERMARK);
+
+ set_reg_field_value(urgency_cntl,
+ total_dest_line_time_ns,
+ DPGV0_PIPE_URGENCY_CONTROL,
+ URGENCY_HIGH_WATERMARK);
+
+ dm_write_reg(ctx, urgency_addr, urgency_cntl);
+}
+
+static void program_urgency_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark(
+ ctx,
+ mmDPGV0_PIPE_URGENCY_CONTROL,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ marks_low,
+ total_dest_line_time_ns);
+}
+
+static void program_urgency_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks_low,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark(
+ ctx,
+ mmDPGV1_PIPE_URGENCY_CONTROL,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ marks_low,
+ total_dest_line_time_ns);
+}
+
+static void program_stutter_watermark(
+ const struct dc_context *ctx,
+ const uint32_t stutter_addr,
+ const uint32_t wm_addr,
+ struct dce_watermarks marks)
+{
+ /* register value */
+ uint32_t stutter_cntl = 0;
+ uint32_t wm_mask_cntl = 0;
+
+ /*Write mask to enable reading/writing of watermark set A*/
+
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ stutter_cntl = dm_read_reg(ctx, stutter_addr);
+
+ if (ctx->dc->debug.disable_stutter) {
+ set_reg_field_value(stutter_cntl,
+ 0,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE);
+ } else {
+ set_reg_field_value(stutter_cntl,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_ENABLE);
+ }
+
+ set_reg_field_value(stutter_cntl,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_IGNORE_FBC);
+
+ /*Write watermark set A*/
+ set_reg_field_value(stutter_cntl,
+ marks.a_mark,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK);
+ dm_write_reg(ctx, stutter_addr, stutter_cntl);
+
+ /*Write mask to enable reading/writing of watermark set B*/
+ wm_mask_cntl = dm_read_reg(ctx, wm_addr);
+ set_reg_field_value(wm_mask_cntl,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_addr, wm_mask_cntl);
+
+ stutter_cntl = dm_read_reg(ctx, stutter_addr);
+ /*Write watermark set B*/
+ set_reg_field_value(stutter_cntl,
+ marks.b_mark,
+ DPGV0_PIPE_STUTTER_CONTROL,
+ STUTTER_EXIT_SELF_REFRESH_WATERMARK);
+ dm_write_reg(ctx, stutter_addr, stutter_cntl);
+}
+
+static void program_stutter_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_stutter_watermark(ctx,
+ mmDPGV0_PIPE_STUTTER_CONTROL,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ marks);
+}
+
+static void program_stutter_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_stutter_watermark(ctx,
+ mmDPGV1_PIPE_STUTTER_CONTROL,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ marks);
+}
+
+static void program_nbp_watermark(
+ const struct dc_context *ctx,
+ const uint32_t wm_mask_ctrl_addr,
+ const uint32_t nbp_pstate_ctrl_addr,
+ struct dce_watermarks marks)
+{
+ uint32_t value;
+
+ /* Write mask to enable reading/writing of watermark set A */
+
+ value = dm_read_reg(ctx, wm_mask_ctrl_addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_mask_ctrl_addr, value);
+
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write watermark set A */
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ marks.a_mark,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write mask to enable reading/writing of watermark set B */
+ value = dm_read_reg(ctx, wm_mask_ctrl_addr);
+ set_reg_field_value(
+ value,
+ 2,
+ DPGV0_WATERMARK_MASK_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK_MASK);
+ dm_write_reg(ctx, wm_mask_ctrl_addr, value);
+
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_ENABLE);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_URGENT_DURING_REQUEST);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+
+ /* Write watermark set B */
+ value = dm_read_reg(ctx, nbp_pstate_ctrl_addr);
+ set_reg_field_value(
+ value,
+ marks.b_mark,
+ DPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ NB_PSTATE_CHANGE_WATERMARK);
+ dm_write_reg(ctx, nbp_pstate_ctrl_addr, value);
+}
+
+static void program_nbp_watermark_l(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_nbp_watermark(ctx,
+ mmDPGV0_WATERMARK_MASK_CONTROL,
+ mmDPGV0_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ marks);
+}
+
+static void program_nbp_watermark_c(
+ const struct dc_context *ctx,
+ struct dce_watermarks marks)
+{
+ program_nbp_watermark(ctx,
+ mmDPGV1_WATERMARK_MASK_CONTROL,
+ mmDPGV1_PIPE_NB_PSTATE_CHANGE_CONTROL,
+ marks);
+}
+
+void dce_mem_input_v_program_display_marks(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark_l(
+ mem_input->ctx,
+ urgent,
+ total_dest_line_time_ns);
+
+ program_nbp_watermark_l(
+ mem_input->ctx,
+ nbp);
+
+ program_stutter_watermark_l(
+ mem_input->ctx,
+ stutter);
+
+}
+
+void dce_mem_input_program_chroma_display_marks(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns)
+{
+ program_urgency_watermark_c(
+ mem_input->ctx,
+ urgent,
+ total_dest_line_time_ns);
+
+ program_nbp_watermark_c(
+ mem_input->ctx,
+ nbp);
+
+ program_stutter_watermark_c(
+ mem_input->ctx,
+ stutter);
+}
+
+void dce110_allocate_mem_input_v(
+ struct mem_input *mi,
+ uint32_t h_total,/* for current stream */
+ uint32_t v_total,/* for current stream */
+ uint32_t pix_clk_khz,/* for current stream */
+ uint32_t total_stream_num)
+{
+ uint32_t addr;
+ uint32_t value;
+ uint32_t pix_dur;
+ if (pix_clk_khz != 0) {
+ addr = mmDPGV0_PIPE_ARBITRATION_CONTROL1;
+ value = dm_read_reg(mi->ctx, addr);
+ pix_dur = 1000000000ULL / pix_clk_khz;
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPGV0_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV1_PIPE_ARBITRATION_CONTROL1;
+ value = dm_read_reg(mi->ctx, addr);
+ pix_dur = 1000000000ULL / pix_clk_khz;
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPGV1_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV0_PIPE_ARBITRATION_CONTROL2;
+ value = 0x4000800;
+ dm_write_reg(mi->ctx, addr, value);
+
+ addr = mmDPGV1_PIPE_ARBITRATION_CONTROL2;
+ value = 0x4000800;
+ dm_write_reg(mi->ctx, addr, value);
+ }
+
+}
+
+void dce110_free_mem_input_v(
+ struct mem_input *mi,
+ uint32_t total_stream_num)
+{
+}
+
+static struct mem_input_funcs dce110_mem_input_v_funcs = {
+ .mem_input_program_display_marks =
+ dce_mem_input_v_program_display_marks,
+ .mem_input_program_chroma_display_marks =
+ dce_mem_input_program_chroma_display_marks,
+ .allocate_mem_input = dce110_allocate_mem_input_v,
+ .free_mem_input = dce110_free_mem_input_v,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mem_input_v_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm =
+ dce_mem_input_v_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mem_input_v_program_surface_config,
+ .mem_input_is_flip_pending =
+ dce_mem_input_v_is_surface_pending
+};
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dce110_mem_input_v_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx)
+{
+ dce_mi->base.funcs = &dce110_mem_input_v_funcs;
+ dce_mi->base.ctx = ctx;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
new file mode 100644
index 000000000000..f01d4a607fea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h
@@ -0,0 +1,35 @@
+/* Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_V_DCE110_H__
+#define __DC_MEM_INPUT_V_DCE110_H__
+
+#include "mem_input.h"
+#include "dce/dce_mem_input.h"
+
+void dce110_mem_input_v_construct(
+ struct dce_mem_input *dce_mi,
+ struct dc_context *ctx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
new file mode 100644
index 000000000000..feb397b5c1a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce110_transform_v.h"
+#include "basics/conversion.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+enum {
+ OUTPUT_CSC_MATRIX_SIZE = 12
+};
+
+/* constrast:0 - 2.0, default 1.0 */
+#define UNDERLAY_CONTRAST_DEFAULT 100
+#define UNDERLAY_CONTRAST_MAX 200
+#define UNDERLAY_CONTRAST_MIN 0
+#define UNDERLAY_CONTRAST_STEP 1
+#define UNDERLAY_CONTRAST_DIVIDER 100
+
+/* Saturation: 0 - 2.0; default 1.0 */
+#define UNDERLAY_SATURATION_DEFAULT 100 /*1.00*/
+#define UNDERLAY_SATURATION_MIN 0
+#define UNDERLAY_SATURATION_MAX 200 /* 2.00 */
+#define UNDERLAY_SATURATION_STEP 1 /* 0.01 */
+/*actual max overlay saturation
+ * value = UNDERLAY_SATURATION_MAX /UNDERLAY_SATURATION_DIVIDER
+ */
+
+/* Hue */
+#define UNDERLAY_HUE_DEFAULT 0
+#define UNDERLAY_HUE_MIN -300
+#define UNDERLAY_HUE_MAX 300
+#define UNDERLAY_HUE_STEP 5
+#define UNDERLAY_HUE_DIVIDER 10 /* HW range: -30 ~ +30 */
+#define UNDERLAY_SATURATION_DIVIDER 100
+
+/* Brightness: in DAL usually -.25 ~ .25.
+ * In MMD is -100 to +100 in 16-235 range; which when scaled to full range is
+ * ~-116 to +116. When normalized this is about 0.4566.
+ * With 100 divider this becomes 46, but we may use another for better precision
+ * The ideal one is 100/219 ((100/255)*(255/219)),
+ * i.e. min/max = +-100, divider = 219
+ * default 0.0
+ */
+#define UNDERLAY_BRIGHTNESS_DEFAULT 0
+#define UNDERLAY_BRIGHTNESS_MIN -46 /* ~116/255 */
+#define UNDERLAY_BRIGHTNESS_MAX 46
+#define UNDERLAY_BRIGHTNESS_STEP 1 /* .01 */
+#define UNDERLAY_BRIGHTNESS_DIVIDER 100
+
+static const struct out_csc_color_matrix global_color_matrix[] = {
+{ COLOR_SPACE_SRGB,
+ { 0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+{ COLOR_SPACE_SRGB_LIMITED,
+ { 0x1B60, 0, 0, 0x200, 0, 0x1B60, 0, 0x200, 0, 0, 0x1B60, 0x200} },
+{ COLOR_SPACE_YCBCR601,
+ { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x82F, 0x1012, 0x31F, 0x200, 0xFB47,
+ 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x5D2, 0x1394, 0x1FA,
+ 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
+/* TODO: correct values below */
+{ COLOR_SPACE_YCBCR601_LIMITED, { 0xE00, 0xF447, 0xFDB9, 0x1000, 0x991,
+ 0x12C9, 0x3A6, 0x200, 0xFB47, 0xF6B9, 0xE00, 0x1000} },
+{ COLOR_SPACE_YCBCR709_LIMITED, { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
+ 0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} }
+};
+
+enum csc_color_mode {
+ /* 00 - BITS2:0 Bypass */
+ CSC_COLOR_MODE_GRAPHICS_BYPASS,
+ /* 01 - hard coded coefficient TV RGB */
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED,
+ /* 04 - programmable OUTPUT CSC coefficient */
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC,
+};
+
+enum grph_color_adjust_option {
+ GRPH_COLOR_MATRIX_HW_DEFAULT = 1,
+ GRPH_COLOR_MATRIX_SW
+};
+
+static void program_color_matrix_v(
+ struct dce_transform *xfm_dce,
+ const struct out_csc_color_matrix *tbl_entry,
+ enum grph_color_adjust_option options)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t cntl_value = dm_read_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL);
+ bool use_set_a = (get_reg_field_value(cntl_value,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE) != 4);
+
+ set_reg_field_value(
+ cntl_value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ if (use_set_a) {
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C11_C12_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[0],
+ OUTPUT_CSC_C11_C12_A,
+ OUTPUT_CSC_C11_A);
+
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[1],
+ OUTPUT_CSC_C11_C12_A,
+ OUTPUT_CSC_C12_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C13_C14_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[2],
+ OUTPUT_CSC_C13_C14_A,
+ OUTPUT_CSC_C13_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[3],
+ OUTPUT_CSC_C13_C14_A,
+ OUTPUT_CSC_C14_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C21_C22_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[4],
+ OUTPUT_CSC_C21_C22_A,
+ OUTPUT_CSC_C21_A);
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[5],
+ OUTPUT_CSC_C21_C22_A,
+ OUTPUT_CSC_C22_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C23_C24_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[6],
+ OUTPUT_CSC_C23_C24_A,
+ OUTPUT_CSC_C23_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[7],
+ OUTPUT_CSC_C23_C24_A,
+ OUTPUT_CSC_C24_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C31_C32_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[8],
+ OUTPUT_CSC_C31_C32_A,
+ OUTPUT_CSC_C31_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[9],
+ OUTPUT_CSC_C31_C32_A,
+ OUTPUT_CSC_C32_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C33_C34_A;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[10],
+ OUTPUT_CSC_C33_C34_A,
+ OUTPUT_CSC_C33_A);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[11],
+ OUTPUT_CSC_C33_C34_A,
+ OUTPUT_CSC_C34_A);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ set_reg_field_value(
+ cntl_value,
+ 4,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ } else {
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C11_C12_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[0],
+ OUTPUT_CSC_C11_C12_B,
+ OUTPUT_CSC_C11_B);
+
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[1],
+ OUTPUT_CSC_C11_C12_B,
+ OUTPUT_CSC_C12_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C13_C14_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[2],
+ OUTPUT_CSC_C13_C14_B,
+ OUTPUT_CSC_C13_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[3],
+ OUTPUT_CSC_C13_C14_B,
+ OUTPUT_CSC_C14_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C21_C22_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[4],
+ OUTPUT_CSC_C21_C22_B,
+ OUTPUT_CSC_C21_B);
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[5],
+ OUTPUT_CSC_C21_C22_B,
+ OUTPUT_CSC_C22_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C23_C24_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[6],
+ OUTPUT_CSC_C23_C24_B,
+ OUTPUT_CSC_C23_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[7],
+ OUTPUT_CSC_C23_C24_B,
+ OUTPUT_CSC_C24_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C31_C32_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[8],
+ OUTPUT_CSC_C31_C32_B,
+ OUTPUT_CSC_C31_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[9],
+ OUTPUT_CSC_C31_C32_B,
+ OUTPUT_CSC_C32_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ {
+ uint32_t value = 0;
+ uint32_t addr = mmOUTPUT_CSC_C33_C34_B;
+ /* fixed S2.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[10],
+ OUTPUT_CSC_C33_C34_B,
+ OUTPUT_CSC_C33_B);
+ /* fixed S0.13 format */
+ set_reg_field_value(
+ value,
+ tbl_entry->regval[11],
+ OUTPUT_CSC_C33_C34_B,
+ OUTPUT_CSC_C34_B);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ set_reg_field_value(
+ cntl_value,
+ 5,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ }
+
+ dm_write_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL, cntl_value);
+}
+
+static bool configure_graphics_mode_v(
+ struct dce_transform *xfm_dce,
+ enum csc_color_mode config,
+ enum graphics_csc_adjust_type csc_adjust_type,
+ enum dc_color_space color_space)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_SW) {
+ if (config == CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC)
+ return true;
+
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* not supported for underlay on CZ */
+ return false;
+
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ set_reg_field_value(
+ value,
+ 2,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ set_reg_field_value(
+ value,
+ 3,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ default:
+ return false;
+ }
+
+ } else if (csc_adjust_type == GRAPHICS_CSC_ADJUST_TYPE_HW) {
+ switch (color_space) {
+ case COLOR_SPACE_SRGB:
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ /* not supported for underlay on CZ */
+ return false;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ /* YCbCr601 */
+ set_reg_field_value(
+ value,
+ 2,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ /* YCbCr709 */
+ set_reg_field_value(
+ value,
+ 3,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+ break;
+ default:
+ return false;
+ }
+
+ } else
+ /* by pass */
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_OUTPUT_CSC_CONTROL,
+ OUTPUT_CSC_MODE);
+
+ addr = mmCOL_MAN_OUTPUT_CSC_CONTROL;
+ dm_write_reg(ctx, addr, value);
+
+ return true;
+}
+
+/*TODO: color depth is not correct when this is called*/
+static void set_Denormalization(struct transform *xfm,
+ enum dc_color_depth color_depth)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL);
+
+ switch (color_depth) {
+ case COLOR_DEPTH_888:
+ /* 255/256 for 8 bit output color depth */
+ set_reg_field_value(
+ value,
+ 1,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ case COLOR_DEPTH_101010:
+ /* 1023/1024 for 10 bit output color depth */
+ set_reg_field_value(
+ value,
+ 2,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ case COLOR_DEPTH_121212:
+ /* 4095/4096 for 12 bit output color depth */
+ set_reg_field_value(
+ value,
+ 3,
+ DENORM_CLAMP_CONTROL,
+ DENORM_MODE);
+ break;
+ default:
+ /* not valid case */
+ break;
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ DENORM_CLAMP_CONTROL,
+ DENORM_10BIT_OUT);
+
+ dm_write_reg(xfm->ctx, mmDENORM_CLAMP_CONTROL, value);
+}
+
+struct input_csc_matrix {
+ enum dc_color_space color_space;
+ uint32_t regval[12];
+};
+
+static const struct input_csc_matrix input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+/*1_1 1_2 1_3 1_4 2_1 2_2 2_3 2_4 3_1 3_2 3_3 3_4 */
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0x0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0x0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0x0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0x0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
+static void program_input_csc(
+ struct transform *xfm, enum dc_color_space color_space)
+{
+ int arr_size = sizeof(input_csc_matrix)/sizeof(struct input_csc_matrix);
+ struct dc_context *ctx = xfm->ctx;
+ const uint32_t *regval = NULL;
+ bool use_set_a;
+ uint32_t value;
+ int i;
+
+ for (i = 0; i < arr_size; i++)
+ if (input_csc_matrix[i].color_space == color_space) {
+ regval = input_csc_matrix[i].regval;
+ break;
+ }
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /*
+ * 1 == set A, the logic is 'if currently we're not using set A,
+ * then use set A, otherwise use set B'
+ */
+ value = dm_read_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL);
+ use_set_a = get_reg_field_value(
+ value, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_MODE) != 1;
+
+ if (use_set_a) {
+ /* fixed S2.13 format */
+ value = 0;
+ set_reg_field_value(
+ value, regval[0], INPUT_CSC_C11_C12_A, INPUT_CSC_C11_A);
+ set_reg_field_value(
+ value, regval[1], INPUT_CSC_C11_C12_A, INPUT_CSC_C12_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C11_C12_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[2], INPUT_CSC_C13_C14_A, INPUT_CSC_C13_A);
+ set_reg_field_value(
+ value, regval[3], INPUT_CSC_C13_C14_A, INPUT_CSC_C14_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C13_C14_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[4], INPUT_CSC_C21_C22_A, INPUT_CSC_C21_A);
+ set_reg_field_value(
+ value, regval[5], INPUT_CSC_C21_C22_A, INPUT_CSC_C22_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C21_C22_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[6], INPUT_CSC_C23_C24_A, INPUT_CSC_C23_A);
+ set_reg_field_value(
+ value, regval[7], INPUT_CSC_C23_C24_A, INPUT_CSC_C24_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C23_C24_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[8], INPUT_CSC_C31_C32_A, INPUT_CSC_C31_A);
+ set_reg_field_value(
+ value, regval[9], INPUT_CSC_C31_C32_A, INPUT_CSC_C32_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C31_C32_A, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[10], INPUT_CSC_C33_C34_A, INPUT_CSC_C33_A);
+ set_reg_field_value(
+ value, regval[11], INPUT_CSC_C33_C34_A, INPUT_CSC_C34_A);
+ dm_write_reg(ctx, mmINPUT_CSC_C33_C34_A, value);
+ } else {
+ /* fixed S2.13 format */
+ value = 0;
+ set_reg_field_value(
+ value, regval[0], INPUT_CSC_C11_C12_B, INPUT_CSC_C11_B);
+ set_reg_field_value(
+ value, regval[1], INPUT_CSC_C11_C12_B, INPUT_CSC_C12_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C11_C12_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[2], INPUT_CSC_C13_C14_B, INPUT_CSC_C13_B);
+ set_reg_field_value(
+ value, regval[3], INPUT_CSC_C13_C14_B, INPUT_CSC_C14_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C13_C14_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[4], INPUT_CSC_C21_C22_B, INPUT_CSC_C21_B);
+ set_reg_field_value(
+ value, regval[5], INPUT_CSC_C21_C22_B, INPUT_CSC_C22_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C21_C22_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[6], INPUT_CSC_C23_C24_B, INPUT_CSC_C23_B);
+ set_reg_field_value(
+ value, regval[7], INPUT_CSC_C23_C24_B, INPUT_CSC_C24_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C23_C24_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[8], INPUT_CSC_C31_C32_B, INPUT_CSC_C31_B);
+ set_reg_field_value(
+ value, regval[9], INPUT_CSC_C31_C32_B, INPUT_CSC_C32_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C31_C32_B, value);
+
+ value = 0;
+ set_reg_field_value(
+ value, regval[10], INPUT_CSC_C33_C34_B, INPUT_CSC_C33_B);
+ set_reg_field_value(
+ value, regval[11], INPUT_CSC_C33_C34_B, INPUT_CSC_C34_B);
+ dm_write_reg(ctx, mmINPUT_CSC_C33_C34_B, value);
+ }
+
+ /* KK: leave INPUT_CSC_CONVERSION_MODE at default */
+ value = 0;
+ /*
+ * select 8.4 input type instead of default 12.0. From the discussion
+ * with HW team, this format depends on the UNP surface format, so for
+ * 8-bit we should select 8.4 (4 bits truncated). For 10 it should be
+ * 10.2. For Carrizo we only support 8-bit surfaces on underlay pipe
+ * so we can always keep this at 8.4 (input_type=2). If the later asics
+ * start supporting 10+ bits, we will have a problem: surface
+ * programming including UNP_GRPH* is being done in DalISR after this,
+ * so either we pass surface format to here, or move this logic to ISR
+ */
+
+ set_reg_field_value(
+ value, 2, COL_MAN_INPUT_CSC_CONTROL, INPUT_CSC_INPUT_TYPE);
+ set_reg_field_value(
+ value,
+ use_set_a ? 1 : 2,
+ COL_MAN_INPUT_CSC_CONTROL,
+ INPUT_CSC_MODE);
+
+ dm_write_reg(ctx, mmCOL_MAN_INPUT_CSC_CONTROL, value);
+}
+
+void dce110_opp_v_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_PREDEFINED;
+
+ if (default_adjust->force_hw_default == false) {
+ const struct out_csc_color_matrix *elm;
+ /* currently parameter not in use */
+ enum grph_color_adjust_option option =
+ GRPH_COLOR_MATRIX_HW_DEFAULT;
+ uint32_t i;
+ /*
+ * HW default false we program locally defined matrix
+ * HW default true we use predefined hw matrix and we
+ * do not need to program matrix
+ * OEM wants the HW default via runtime parameter.
+ */
+ option = GRPH_COLOR_MATRIX_SW;
+
+ for (i = 0; i < ARRAY_SIZE(global_color_matrix); ++i) {
+ elm = &global_color_matrix[i];
+ if (elm->color_space != default_adjust->out_color_space)
+ continue;
+ /* program the matrix with default values from this
+ * file
+ */
+ program_color_matrix_v(xfm_dce, elm, option);
+ config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ break;
+ }
+ }
+
+ program_input_csc(xfm, default_adjust->in_color_space);
+
+ /* configure the what we programmed :
+ * 1. Default values from this file
+ * 2. Use hardware default from ROM_A and we do not need to program
+ * matrix
+ */
+
+ configure_graphics_mode_v(xfm_dce, config,
+ default_adjust->csc_adjust_type,
+ default_adjust->out_color_space);
+
+ set_Denormalization(xfm, default_adjust->color_depth);
+}
+
+void dce110_opp_v_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ enum csc_color_mode config =
+ CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+
+ program_color_matrix_v(
+ xfm_dce, tbl_entry, GRAPHICS_CSC_ADJUST_TYPE_SW);
+
+ /* We did everything ,now program DxOUTPUT_CSC_CONTROL */
+ configure_graphics_mode_v(xfm_dce, config, GRAPHICS_CSC_ADJUST_TYPE_SW,
+ tbl_entry->color_space);
+
+ /*TODO: Check if denormalization is needed*/
+ /*set_Denormalization(opp, adjust->color_depth);*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
new file mode 100644
index 000000000000..e98ed3058ea2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dce110_transform_v.h"
+
+static void power_on_lut(struct transform *xfm,
+ bool power_on, bool inputgamma, bool regamma)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+ int i;
+
+ if (power_on) {
+ if (inputgamma)
+ set_reg_field_value(
+ value,
+ 1,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+ if (regamma)
+ set_reg_field_value(
+ value,
+ 1,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+ } else {
+ if (inputgamma)
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+ if (regamma)
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+ }
+
+ dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value);
+
+ for (i = 0; i < 3; i++) {
+ value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+ if (get_reg_field_value(value,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS) &&
+ get_reg_field_value(value,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS))
+ break;
+
+ udelay(2);
+ }
+}
+
+static void set_bypass_input_gamma(struct dce_transform *xfm_dce)
+{
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx,
+ mmCOL_MAN_INPUT_GAMMA_CONTROL1);
+
+ set_reg_field_value(
+ value,
+ 0,
+ COL_MAN_INPUT_GAMMA_CONTROL1,
+ INPUT_GAMMA_MODE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmCOL_MAN_INPUT_GAMMA_CONTROL1, value);
+}
+
+static void configure_regamma_mode(struct dce_transform *xfm_dce, uint32_t mode)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ mode,
+ GAMMA_CORR_CONTROL,
+ GAMMA_CORR_MODE);
+
+ dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CONTROL, 0);
+}
+
+/*
+ *****************************************************************************
+ * Function: regamma_config_regions_and_segments
+ *
+ * build regamma curve by using predefined hw points
+ * uses interface parameters ,like EDID coeff.
+ *
+ * @param : parameters interface parameters
+ * @return void
+ *
+ * @note
+ *
+ * @see
+ *
+ *****************************************************************************
+ */
+static void regamma_config_regions_and_segments(
+ struct dce_transform *xfm_dce, const struct pwl_params *params)
+{
+ const struct gamma_curve *curve;
+ uint32_t value = 0;
+
+ {
+ set_reg_field_value(
+ value,
+ params->arr_points[0].custom_float_x,
+ GAMMA_CORR_CNTLA_START_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_START);
+
+ set_reg_field_value(
+ value,
+ 0,
+ GAMMA_CORR_CNTLA_START_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_START_SEGMENT);
+
+ dm_write_reg(xfm_dce->base.ctx, mmGAMMA_CORR_CNTLA_START_CNTL,
+ value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[0].custom_float_slope,
+ GAMMA_CORR_CNTLA_SLOPE_CNTL,
+ GAMMA_CORR_CNTLA_EXP_REGION_LINEAR_SLOPE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_SLOPE_CNTL, value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[1].custom_float_x,
+ GAMMA_CORR_CNTLA_END_CNTL1,
+ GAMMA_CORR_CNTLA_EXP_REGION_END);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_END_CNTL1, value);
+ }
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ params->arr_points[2].custom_float_slope,
+ GAMMA_CORR_CNTLA_END_CNTL2,
+ GAMMA_CORR_CNTLA_EXP_REGION_END_BASE);
+
+ set_reg_field_value(
+ value,
+ params->arr_points[1].custom_float_y,
+ GAMMA_CORR_CNTLA_END_CNTL2,
+ GAMMA_CORR_CNTLA_EXP_REGION_END_SLOPE);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_END_CNTL2, value);
+ }
+
+ curve = params->arr_curve_points;
+
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION0_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION0_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION1_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_0_1,
+ GAMMA_CORR_CNTLA_EXP_REGION1_NUM_SEGMENTS);
+
+ dm_write_reg(
+ xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_0_1,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION2_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION2_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION3_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_2_3,
+ GAMMA_CORR_CNTLA_EXP_REGION3_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_2_3,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION4_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION4_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION5_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_4_5,
+ GAMMA_CORR_CNTLA_EXP_REGION5_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_4_5,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION6_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION6_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION7_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_6_7,
+ GAMMA_CORR_CNTLA_EXP_REGION7_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_6_7,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION8_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION8_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION9_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_8_9,
+ GAMMA_CORR_CNTLA_EXP_REGION9_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_8_9,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION10_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION10_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION11_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_10_11,
+ GAMMA_CORR_CNTLA_EXP_REGION11_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_10_11,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION12_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION12_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION13_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_12_13,
+ GAMMA_CORR_CNTLA_EXP_REGION13_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_12_13,
+ value);
+ }
+
+ curve += 2;
+ {
+ value = 0;
+ set_reg_field_value(
+ value,
+ curve[0].offset,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION14_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[0].segments_num,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION14_NUM_SEGMENTS);
+
+ set_reg_field_value(
+ value,
+ curve[1].offset,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION15_LUT_OFFSET);
+
+ set_reg_field_value(
+ value,
+ curve[1].segments_num,
+ GAMMA_CORR_CNTLA_REGION_14_15,
+ GAMMA_CORR_CNTLA_EXP_REGION15_NUM_SEGMENTS);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_CNTLA_REGION_14_15,
+ value);
+ }
+}
+
+static void program_pwl(struct dce_transform *xfm_dce,
+ const struct pwl_params *params)
+{
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ 7,
+ GAMMA_CORR_LUT_WRITE_EN_MASK,
+ GAMMA_CORR_LUT_WRITE_EN_MASK);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_LUT_WRITE_EN_MASK, value);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmGAMMA_CORR_LUT_INDEX, 0);
+
+ /* Program REGAMMA_LUT_DATA */
+ {
+ const uint32_t addr = mmGAMMA_CORR_LUT_DATA;
+ uint32_t i = 0;
+ const struct pwl_result_data *rgb =
+ params->rgb_resulted;
+
+ while (i != params->hw_points_num) {
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->red_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->green_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr, rgb->blue_reg);
+
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_red_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_green_reg);
+ dm_write_reg(xfm_dce->base.ctx, addr,
+ rgb->delta_blue_reg);
+
+ ++rgb;
+ ++i;
+ }
+ }
+}
+
+void dce110_opp_program_regamma_pwl_v(
+ struct transform *xfm,
+ const struct pwl_params *params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ /* Setup regions */
+ regamma_config_regions_and_segments(xfm_dce, params);
+
+ set_bypass_input_gamma(xfm_dce);
+
+ /* Power on gamma LUT memory */
+ power_on_lut(xfm, true, false, true);
+
+ /* Program PWL */
+ program_pwl(xfm_dce, params);
+
+ /* program regamma config */
+ configure_regamma_mode(xfm_dce, 1);
+
+ /* Power return to auto back */
+ power_on_lut(xfm, false, false, true);
+}
+
+void dce110_opp_power_on_regamma_lut_v(
+ struct transform *xfm,
+ bool power_on)
+{
+ uint32_t value = dm_read_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_FORCE);
+
+ set_reg_field_value(
+ value,
+ power_on,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_GAMMA_CORR_MEM_PWR_DIS);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_FORCE);
+
+ set_reg_field_value(
+ value,
+ power_on,
+ DCFEV_MEM_PWR_CTRL,
+ COL_MAN_INPUT_GAMMA_MEM_PWR_DIS);
+
+ dm_write_reg(xfm->ctx, mmDCFEV_MEM_PWR_CTRL, value);
+}
+
+void dce110_opp_set_regamma_mode_v(
+ struct transform *xfm,
+ enum opp_regamma mode)
+{
+ // TODO: need to implement the function
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
new file mode 100644
index 000000000000..3545e43a4b77
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dce/dce_opp.h"
+#include "dce110_opp_v.h"
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static const struct opp_funcs funcs = {
+ .opp_set_dyn_expansion = dce110_opp_set_dyn_expansion,
+ .opp_destroy = dce110_opp_destroy,
+ .opp_program_fmt = dce110_opp_program_fmt,
+ .opp_program_bit_depth_reduction =
+ dce110_opp_program_bit_depth_reduction
+};
+
+void dce110_opp_v_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx)
+{
+ opp110->base.funcs = &funcs;
+
+ opp110->base.ctx = ctx;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
new file mode 100644
index 000000000000..152af4c418cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h
@@ -0,0 +1,39 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCE110_V_H__
+#define __DC_OPP_DCE110_V_H__
+
+#include "dc_types.h"
+#include "opp.h"
+#include "core_types.h"
+
+void dce110_opp_v_construct(struct dce110_opp *opp110,
+ struct dc_context *ctx);
+
+/* underlay callbacks */
+
+
+
+#endif /* __DC_OPP_DCE110_V_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
new file mode 100644
index 000000000000..61adb8174ce0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -0,0 +1,1329 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "dce110/dce110_resource.h"
+
+#include "include/irq_service_interface.h"
+#include "dce/dce_audio.h"
+#include "dce110/dce110_timing_generator.h"
+#include "irq/dce110/irq_service_dce110.h"
+#include "dce110/dce110_timing_generator_v.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_mem_input.h"
+#include "dce110/dce110_mem_input_v.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce110/dce110_transform_v.h"
+#include "dce/dce_opp.h"
+#include "dce110/dce110_opp_v.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+#include "dce110/dce110_compressor.h"
+#endif
+
+#include "reg_helper.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_8_2_d.h"
+#include "gmc/gmc_8_2_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+#ifndef DPHY_RX_FAST_TRAINING_CAPABLE
+ #define DPHY_RX_FAST_TRAINING_CAPABLE 0x1
+#endif
+
+static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE110_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_110_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_110(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+/* AG TBD Needs to be reduced back to 3 pipes once dce10 hw sequencer implemented. */
+
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_100_110(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps carrizo_resource_cap = {
+ .num_timing_generator = 3,
+ .num_video_plane = 1,
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
+};
+
+static const struct resource_caps stoney_resource_cap = {
+ .num_timing_generator = 2,
+ .num_video_plane = 1,
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 2,
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x4819
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce110_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce110_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_stoney_reg = {
+ HWSEQ_ST_REG_LIST()
+};
+
+static const struct dce_hwseq_registers hwseq_cz_reg = {
+ HWSEQ_CZ_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE11_MASK_SH_LIST(__SHIFT),
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE11_MASK_SH_LIST(_MASK),
+};
+
+static struct dce_hwseq *dce110_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = ASIC_REV_IS_STONEY(ctx->asic_id.hw_internal_rev) ?
+ &hwseq_stoney_reg : &hwseq_cz_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ hws->wa.blnd_crtc_trigger = true;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce110_stream_encoder_create,
+ .create_hwseq = dce110_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE11_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE11_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE11_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+
+static struct mem_input *dce110_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 3;
+ return &dce_mi->base;
+}
+
+static void dce110_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce110_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ return &transform->base;
+}
+
+static struct input_pixel_processor *dce110_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 594000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+static struct link_encoder *dce110_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+static struct output_pixel_processor *dce110_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce110_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce110_clock_source_destroy(struct clock_source **clk_src)
+{
+ struct dce110_clk_src *dce110_clk_src;
+
+ if (!clk_src)
+ return;
+
+ dce110_clk_src = TO_DCE110_CLK_SRC(*clk_src);
+
+ kfree(dce110_clk_src->dp_ss_params);
+ kfree(dce110_clk_src->hdmi_ss_params);
+ kfree(dce110_clk_src->dvi_ss_params);
+
+ kfree(dce110_clk_src);
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce110_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce110_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce110_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+
+ /*TODO: is this halved for YCbCr 420? in that case we might want to move
+ * the pixel clock normalization for hdmi up to here instead of doing it
+ * in pll_adjust_pix_clk
+ */
+ pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
+ pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
+ PIXEL_ENCODING_YCBCR420);
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+ }
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ pixel_clk_params->requested_pix_clk = pixel_clk_params->requested_pix_clk / 2;
+ }
+}
+
+void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+}
+
+static bool is_surface_pixel_format_supported(struct pipe_ctx *pipe_ctx, unsigned int underlay_idx)
+{
+ if (pipe_ctx->pipe_idx != underlay_idx)
+ return true;
+ if (!pipe_ctx->plane_state)
+ return false;
+ if (pipe_ctx->plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ return true;
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (!is_surface_pixel_format_supported(pipe_ctx,
+ dc->res_pool->underlay_pipe_index))
+ return DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ /* TODO: validate audio ASIC caps, encoder */
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+static bool dce110_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ bool result = false;
+
+ dm_logger_write(
+ dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "%s: start",
+ __func__);
+
+ if (bw_calcs(
+ dc->ctx,
+ dc->bw_dceip,
+ dc->bw_vbios,
+ context->res_ctx.pipe_ctx,
+ dc->res_pool->pipe_count,
+ &context->bw.dce))
+ result = true;
+
+ if (!result)
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
+ "%s: %dx%d@%d Bandwidth validation failed!\n",
+ __func__,
+ context->streams[0]->timing.h_addressable,
+ context->streams[0]->timing.v_addressable,
+ context->streams[0]->timing.pix_clk_khz);
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+ struct log_entry log_entry;
+ dm_logger_open(
+ dc->ctx->logger,
+ &log_entry,
+ LOG_BANDWIDTH_CALCS);
+ dm_logger_append(&log_entry, "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw.dce.stutter_mode_enable);
+ dm_logger_append(&log_entry,
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+ context->bw.dce.all_displays_in_sync,
+ context->bw.dce.dispclk_khz,
+ context->bw.dce.sclk_khz,
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+ dm_logger_close(&log_entry);
+ }
+ return result;
+}
+
+static bool dce110_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i, j;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 2)
+ return false;
+
+ for (j = 0; j < context->stream_status[i].plane_count; j++) {
+ struct dc_plane_state *plane =
+ context->stream_status[i].plane_states[j];
+
+ /* underlay validation */
+ if (plane->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+
+ if ((plane->src_rect.width > 1920 ||
+ plane->src_rect.height > 1080))
+ return false;
+
+ /* irrespective of plane format,
+ * stream should be RGB encoded
+ */
+ if (context->streams[i]->timing.pixel_encoding
+ != PIXEL_ENCODING_RGB)
+ return false;
+
+ }
+
+ }
+ }
+
+ return true;
+}
+
+enum dc_status dce110_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce110_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static enum dc_status dce110_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+static enum dc_status dce110_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce110_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static struct pipe_ctx *dce110_acquire_underlay(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+ struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = pool->underlay_pipe_index;
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[underlay_idx];
+
+ if (res_ctx->pipe_ctx[underlay_idx].stream)
+ return NULL;
+
+ pipe_ctx->stream_res.tg = pool->timing_generators[underlay_idx];
+ pipe_ctx->plane_res.mi = pool->mis[underlay_idx];
+ /*pipe_ctx->plane_res.ipp = res_ctx->pool->ipps[underlay_idx];*/
+ pipe_ctx->plane_res.xfm = pool->transforms[underlay_idx];
+ pipe_ctx->stream_res.opp = pool->opps[underlay_idx];
+ pipe_ctx->pipe_idx = underlay_idx;
+
+ pipe_ctx->stream = stream;
+
+ if (!dc->current_state->res_ctx.pipe_ctx[underlay_idx].stream) {
+ struct tg_color black_color = {0};
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+
+ dc->hwss.enable_display_power_gating(
+ dc,
+ pipe_ctx->pipe_idx,
+ dcb, PIPE_GATING_CONTROL_DISABLE);
+
+ /*
+ * This is for powering on underlay, so crtc does not
+ * need to be enabled
+ */
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(pipe_ctx->stream_res.tg,
+ &stream->timing,
+ false);
+
+ pipe_ctx->stream_res.tg->funcs->enable_advanced_request(
+ pipe_ctx->stream_res.tg,
+ true,
+ &stream->timing);
+
+ pipe_ctx->plane_res.mi->funcs->allocate_mem_input(pipe_ctx->plane_res.mi,
+ stream->timing.h_total,
+ stream->timing.v_total,
+ stream->timing.pix_clk_khz,
+ context->stream_count);
+
+ color_space_to_black_color(dc,
+ COLOR_SPACE_YCBCR601, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+ }
+
+ return pipe_ctx;
+}
+
+static void dce110_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+
+static const struct resource_funcs dce110_res_pool_funcs = {
+ .destroy = dce110_destroy_resource_pool,
+ .link_enc_create = dce110_link_encoder_create,
+ .validate_guaranteed = dce110_validate_guaranteed,
+ .validate_bandwidth = dce110_validate_bandwidth,
+ .acquire_idle_pipe_for_layer = dce110_acquire_underlay,
+ .add_stream_to_ctx = dce110_add_stream_to_ctx,
+ .validate_global = dce110_validate_global
+};
+
+static bool underlay_create(struct dc_context *ctx, struct resource_pool *pool)
+{
+ struct dce110_timing_generator *dce110_tgv = kzalloc(sizeof(*dce110_tgv),
+ GFP_KERNEL);
+ struct dce_transform *dce110_xfmv = kzalloc(sizeof(*dce110_xfmv),
+ GFP_KERNEL);
+ struct dce_mem_input *dce110_miv = kzalloc(sizeof(*dce110_miv),
+ GFP_KERNEL);
+ struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
+ GFP_KERNEL);
+
+ if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+ kfree(dce110_tgv);
+ kfree(dce110_xfmv);
+ kfree(dce110_miv);
+ kfree(dce110_oppv);
+ return false;
+ }
+
+ dce110_opp_v_construct(dce110_oppv, ctx);
+
+ dce110_timing_generator_v_construct(dce110_tgv, ctx);
+ dce110_mem_input_v_construct(dce110_miv, ctx);
+ dce110_transform_v_construct(dce110_xfmv, ctx);
+
+ pool->opps[pool->pipe_count] = &dce110_oppv->base;
+ pool->timing_generators[pool->pipe_count] = &dce110_tgv->base;
+ pool->mis[pool->pipe_count] = &dce110_miv->base;
+ pool->transforms[pool->pipe_count] = &dce110_xfmv->base;
+ pool->pipe_count++;
+
+ /* update the public caps to indicate an underlay is available */
+ ctx->dc->caps.max_slave_planes = 1;
+ ctx->dc->caps.max_slave_planes = 1;
+
+ return true;
+}
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels clks = {0};
+
+ /*do system clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &clks);
+ /* convert all the clock fro kHz to fix point mHz */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels/8], 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*2/8], 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*3/8], 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*4/8], 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*5/8], 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*6/8], 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+ dc->sclk_lvls = clks;
+
+ /*do display clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK,
+ &clks);
+ dc->bw_vbios->high_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1], 1000);
+ dc->bw_vbios->low_voltage_max_dispclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+}
+
+const struct resource_caps *dce110_resource_cap(
+ struct hw_asic_id *asic_id)
+{
+ if (ASIC_REV_IS_STONEY(asic_id->hw_internal_rev))
+ return &stoney_resource_cap;
+ else
+ return &carrizo_resource_cap;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool,
+ struct hw_asic_id asic_id)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = dce110_resource_cap(&ctx->asic_id);
+ pool->base.funcs = &dce110_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ pool->base.underlay_pipe_index = pool->base.pipe_count;
+
+ dc->caps.max_downscale_ratio = 150;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce110_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1,
+ &clk_src_regs[1], false);
+
+ pool->base.clk_src_count = 2;
+
+ /* TODO: find out if CZ support 3 PLLs */
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce110_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce110_timing_generator_create(
+ ctx, i, &dce110_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce110_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce110_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce110_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce110_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_FBC)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
+
+
+
+#endif
+ if (!underlay_create(ctx, &pool->base))
+ goto res_create_fail;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce110_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce110_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct hw_asic_id asic_id)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool, asic_id))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
new file mode 100644
index 000000000000..e5f168c1f8c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
@@ -0,0 +1,49 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE110_H__
+#define __DC_RESOURCE_DCE110_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+#define TO_DCE110_RES_POOL(pool)\
+ container_of(pool, struct dce110_resource_pool, base)
+
+struct dce110_resource_pool {
+ struct resource_pool base;
+};
+
+void dce110_resource_build_pipe_hw_param(struct pipe_ctx *pipe_ctx);
+
+struct resource_pool *dce110_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct hw_asic_id asic_id);
+
+#endif /* __DC_RESOURCE_DCE110_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
new file mode 100644
index 000000000000..4befce6cd87a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
@@ -0,0 +1,1966 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+#include "dc.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce110_timing_generator.h"
+
+#include "timing_generator.h"
+
+
+#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
+
+#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
+#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
+
+#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
+#define DCP_REG(reg) (reg + tg110->offsets.dcp)
+
+/* Flowing register offsets are same in files of
+ * dce/dce_11_0_d.h
+ * dce/vi_polaris10_p/vi_polaris10_d.h
+ *
+ * So we can create dce110 timing generator to use it.
+ */
+
+
+/*
+* apply_front_porch_workaround
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void dce110_timing_generator_apply_front_porch_workaround(
+ struct timing_generator *tg,
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: is_in_vertical_blank
+ *
+ * @brief
+ * check the current status of CRTC to check if we are in Vertical Blank
+ * regioneased" state
+ *
+ * @return
+ * true if currently in blank region, false otherwise
+ *
+ *****************************************************************************
+ */
+static bool dce110_timing_generator_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ addr = CRTC_REG(mmCRTC_STATUS);
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTC_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+void dce110_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ uint32_t regval;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = CRTC_REG(mmCRTC_CONTROL);
+
+ regval = dm_read_reg(tg->ctx, address);
+ set_reg_field_value(regval, early_cntl,
+ CRTC_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
+ dm_write_reg(tg->ctx, address, regval);
+}
+
+/**
+ * Enable CRTC
+ * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ */
+bool dce110_timing_generator_enable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ /*
+ * 3 is used to make sure V_UPDATE occurs at the beginning of the first
+ * line of vertical front porch
+ */
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_MASTER_UPDATE_MODE,
+ MASTER_UPDATE_MODE);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_MODE), value);
+
+ /* TODO: may want this on to catch underflow */
+ value = 0;
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK), value);
+
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true);
+
+ return result == BP_RESULT_OK;
+}
+
+void dce110_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/**
+ *****************************************************************************
+ * Function: disable_stereo
+ *
+ * @brief
+ * Disables active stereo on controller
+ * Frame Packing need to be disabled in vBlank or when CRTC not running
+ *****************************************************************************
+ */
+#if 0
+@TODOSTEREO
+static void disable_stereo(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_3D_STRUCTURE_CONTROL);
+ uint32_t value = 0;
+ uint32_t test = 0;
+ uint32_t field = 0;
+ uint32_t struc_en = 0;
+ uint32_t struc_stereo_sel_ovr = 0;
+
+ value = dm_read_reg(tg->ctx, addr);
+ struc_en = get_reg_field_value(
+ value,
+ CRTC_3D_STRUCTURE_CONTROL,
+ CRTC_3D_STRUCTURE_EN);
+
+ struc_stereo_sel_ovr = get_reg_field_value(
+ value,
+ CRTC_3D_STRUCTURE_CONTROL,
+ CRTC_3D_STRUCTURE_STEREO_SEL_OVR);
+
+ /*
+ * When disabling Frame Packing in 2 step mode, we need to program both
+ * registers at the same frame
+ * Programming it in the beginning of VActive makes sure we are ok
+ */
+
+ if (struc_en != 0 && struc_stereo_sel_ovr == 0) {
+ tg->funcs->wait_for_vblank(tg);
+ tg->funcs->wait_for_vactive(tg);
+ }
+
+ value = 0;
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = tg->regs[IDX_CRTC_STEREO_CONTROL];
+ dm_write_reg(tg->ctx, addr, value);
+}
+#endif
+
+/**
+ * disable_crtc - call ASIC Control Object to disable Timing generator.
+ */
+bool dce110_timing_generator_disable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, false);
+
+ /* Need to make sure stereo is disabled according to the DCE5.0 spec */
+
+ /*
+ * @TODOSTEREO call this when adding stereo support
+ * tg->funcs->disable_stereo(tg);
+ */
+
+ return result == BP_RESULT_OK;
+}
+
+/**
+* program_horz_count_by_2
+* Programs DxCRTC_HORZ_COUNT_BY2_EN - 1 for DVI 30bpp mode, 0 otherwise
+*
+*/
+static void program_horz_count_by_2(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t regval;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ regval = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_COUNT_CONTROL));
+
+ set_reg_field_value(regval, 0, CRTC_COUNT_CONTROL,
+ CRTC_HORZ_COUNT_BY2_EN);
+
+ if (timing->flags.HORZ_COUNT_BY_TWO)
+ set_reg_field_value(regval, 1, CRTC_COUNT_CONTROL,
+ CRTC_HORZ_COUNT_BY2_EN);
+
+ dm_write_reg(tg->ctx,
+ CRTC_REG(mmCRTC_COUNT_CONTROL), regval);
+}
+
+/**
+ * program_timing_generator
+ * Program CRTC Timing Registers - DxCRTC_H_*, DxCRTC_V_*, Pixel repetition.
+ * Call ASIC Control Object to program Timings.
+ */
+bool dce110_timing_generator_program_timing_generator(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing)
+{
+ enum bp_result result;
+ struct bp_hw_crtc_timing_parameters bp_params;
+ struct dc_crtc_timing patched_crtc_timing;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ uint32_t vsync_offset = dc_crtc_timing->v_border_bottom +
+ dc_crtc_timing->v_front_porch;
+ uint32_t v_sync_start =dc_crtc_timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = dc_crtc_timing->h_border_right +
+ dc_crtc_timing->h_front_porch;
+ uint32_t h_sync_start = dc_crtc_timing->h_addressable + hsync_offset;
+
+ memset(&bp_params, 0, sizeof(struct bp_hw_crtc_timing_parameters));
+
+ /* Due to an asic bug we need to apply the Front Porch workaround prior
+ * to programming the timing.
+ */
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ dce110_timing_generator_apply_front_porch_workaround(tg, &patched_crtc_timing);
+
+ bp_params.controller_id = tg110->controller_id;
+
+ bp_params.h_total = patched_crtc_timing.h_total;
+ bp_params.h_addressable =
+ patched_crtc_timing.h_addressable;
+ bp_params.v_total = patched_crtc_timing.v_total;
+ bp_params.v_addressable = patched_crtc_timing.v_addressable;
+
+ bp_params.h_sync_start = h_sync_start;
+ bp_params.h_sync_width = patched_crtc_timing.h_sync_width;
+ bp_params.v_sync_start = v_sync_start;
+ bp_params.v_sync_width = patched_crtc_timing.v_sync_width;
+
+ /* Set overscan */
+ bp_params.h_overscan_left =
+ patched_crtc_timing.h_border_left;
+ bp_params.h_overscan_right =
+ patched_crtc_timing.h_border_right;
+ bp_params.v_overscan_top = patched_crtc_timing.v_border_top;
+ bp_params.v_overscan_bottom =
+ patched_crtc_timing.v_border_bottom;
+
+ /* Set flags */
+ if (patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY == 1)
+ bp_params.flags.HSYNC_POSITIVE_POLARITY = 1;
+
+ if (patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY == 1)
+ bp_params.flags.VSYNC_POSITIVE_POLARITY = 1;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1)
+ bp_params.flags.INTERLACE = 1;
+
+ if (patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1)
+ bp_params.flags.HORZ_COUNT_BY_TWO = 1;
+
+ result = tg->bp->funcs->program_crtc_timing(tg->bp, &bp_params);
+
+ program_horz_count_by_2(tg, &patched_crtc_timing);
+
+ tg110->base.funcs->enable_advanced_request(tg, true, &patched_crtc_timing);
+
+ /* Enable stereo - only when we need to pack 3D frame. Other types
+ * of stereo handled in explicit call */
+
+ return result == BP_RESULT_OK;
+}
+
+/**
+ *****************************************************************************
+ * Function: set_drr
+ *
+ * @brief
+ * Program dynamic refresh rate registers m_DxCRTC_V_TOTAL_*.
+ *
+ * @param [in] pHwCrtcTiming: point to H
+ * wCrtcTiming struct
+ *****************************************************************************
+ */
+void dce110_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+ /* register values */
+ uint32_t v_total_min = 0;
+ uint32_t v_total_max = 0;
+ uint32_t v_total_cntl = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ uint32_t addr = 0;
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ v_total_min = dm_read_reg(tg->ctx, addr);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ v_total_max = dm_read_reg(tg->ctx, addr);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
+ v_total_cntl = dm_read_reg(tg->ctx, addr);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ set_reg_field_value(v_total_max,
+ params->vertical_total_max - 1,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+
+ set_reg_field_value(v_total_min,
+ params->vertical_total_min - 1,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+
+ set_reg_field_value(v_total_cntl,
+ 1,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MIN_SEL);
+
+ set_reg_field_value(v_total_cntl,
+ 1,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MAX_SEL);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_ON_EVENT);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK_EN);
+
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK);
+ } else {
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_SET_V_TOTAL_MIN_MASK);
+ set_reg_field_value(v_total_min,
+ 0,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+ set_reg_field_value(v_total_max,
+ 0,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MIN_SEL);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_V_TOTAL_MAX_SEL);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_ON_EVENT);
+ set_reg_field_value(v_total_cntl,
+ 0,
+ CRTC_V_TOTAL_CONTROL,
+ CRTC_FORCE_LOCK_TO_MASTER_VSYNC);
+ }
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ dm_write_reg(tg->ctx, addr, v_total_min);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ dm_write_reg(tg->ctx, addr, v_total_max);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_CONTROL);
+ dm_write_reg(tg->ctx, addr, v_total_cntl);
+}
+
+void dce110_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t static_screen_cntl = 0;
+ uint32_t addr = 0;
+
+ addr = CRTC_REG(mmCRTC_STATIC_SCREEN_CONTROL);
+ static_screen_cntl = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(static_screen_cntl,
+ value,
+ CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK);
+
+ set_reg_field_value(static_screen_cntl,
+ 2,
+ CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_FRAME_COUNT);
+
+ dm_write_reg(tg->ctx, addr, static_screen_cntl);
+}
+
+/*
+ * get_vblank_counter
+ *
+ * @brief
+ * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
+ * holds the counter of frames.
+ *
+ * @param
+ * struct timing_generator *tg - [in] timing generator which controls the
+ * desired CRTC
+ *
+ * @return
+ * Counter of frames, which should equal to number of vblanks.
+ */
+uint32_t dce110_timing_generator_get_vblank_counter(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_STATUS_FRAME_COUNT);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+ uint32_t field = get_reg_field_value(
+ value, CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+/**
+ *****************************************************************************
+ * Function: dce110_timing_generator_get_position
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] position
+ *****************************************************************************
+ */
+void dce110_timing_generator_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_STATUS_POSITION));
+
+ position->horizontal_count = get_reg_field_value(
+ value,
+ CRTC_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(
+ value,
+ CRTC_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_NOM_VERT_POSITION));
+
+ position->nominal_vcount = get_reg_field_value(
+ value,
+ CRTC_NOM_VERT_POSITION,
+ CRTC_VERT_COUNT_NOM);
+}
+
+/**
+ *****************************************************************************
+ * Function: get_crtc_scanoutpos
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] vpos, hpos
+ *****************************************************************************
+ */
+void dce110_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ uint32_t value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_BLANK_START_END));
+
+ *v_blank_start = get_reg_field_value(value,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+ *v_blank_end = get_reg_field_value(value,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ dce110_timing_generator_get_position(
+ tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+/* TODO: is it safe to assume that mask/shift of Primary and Underlay
+ * are the same?
+ * For example: today CRTC_H_TOTAL == CRTCV_H_TOTAL but is it always
+ * guaranteed? */
+void dce110_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start =timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+ uint32_t tmp = 0;
+
+ addr = CRTC_REG(mmCRTC_H_TOTAL);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->h_total - 1,
+ CRTC_H_TOTAL,
+ CRTC_H_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and
+ * V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MAX);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_TOTAL_MIN);
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN);
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_H_BLANK_START_END);
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_END);
+
+ tmp = tmp + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_V_BLANK_START_END);
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->v_total - (v_sync_start + timing->v_border_top);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ tmp = tmp + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value;
+ uint32_t addr;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ dyn_range,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+ dm_write_reg(ctx, addr, value);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ set_reg_field_value(
+ value,
+ (1 << index),
+ CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_MASK);
+ /* write color component */
+ dm_write_reg(ctx, addr, value);
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ set_reg_field_value(
+ value,
+ dst_color[index],
+ CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_DATA);
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ dm_write_reg(ctx, addr, value);
+
+ /* enable test pattern */
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 8,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ set_reg_field_value(
+ value,
+ inc_base,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0);
+ set_reg_field_value(
+ value,
+ inc_base + 2,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC1);
+ set_reg_field_value(
+ value,
+ 8,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_HRES);
+ set_reg_field_value(
+ value,
+ 5,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES);
+ set_reg_field_value(
+ value,
+ 384 << 6,
+ CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET);
+ }
+ break;
+ default:
+ break;
+ }
+ dm_write_reg(ctx, addr, value);
+
+ value = 0;
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_COLOR);
+ dm_write_reg(ctx, addr, value);
+
+ /* enable test pattern */
+ addr = CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL);
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN);
+
+ set_reg_field_value(
+ value,
+ mode,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_MODE);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE);
+ /* add color depth translation here */
+ set_reg_field_value(
+ value,
+ bit_depth,
+ CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_COLOR_FORMAT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ value = 0;
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_CONTROL), value);
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_COLOR), value);
+ dm_write_reg(ctx, CRTC_REG(mmCRTC_TEST_PATTERN_PARAMETERS),
+ value);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+* dce110_timing_generator_validate_timing
+* The timing generators support a maximum display size of is 8192 x 8192 pixels,
+* including both active display and blanking periods. Check H Total and V Total.
+*/
+bool dce110_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal)
+{
+ uint32_t h_blank;
+ uint32_t h_back_porch, hsync_offset, h_sync_start;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ ASSERT(timing != NULL);
+
+ if (!timing)
+ return false;
+
+ hsync_offset = timing->h_border_right + timing->h_front_porch;
+ h_sync_start = timing->h_addressable + hsync_offset;
+
+ /* Currently we don't support 3D, so block all 3D timings */
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
+ return false;
+
+ /* Temporarily blocking interlacing mode until it's supported */
+ if (timing->flags.INTERLACE == 1)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > tg110->max_h_total ||
+ timing->v_total > tg110->max_v_total)
+ return false;
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (h_blank < tg110->min_h_blank)
+ return false;
+
+ if (timing->h_front_porch < tg110->min_h_front_porch)
+ return false;
+
+ h_back_porch = h_blank - (h_sync_start -
+ timing->h_addressable -
+ timing->h_border_right -
+ timing->h_sync_width);
+
+ if (h_back_porch < tg110->min_h_back_porch)
+ return false;
+
+ return true;
+}
+
+/**
+* Wait till we are at the beginning of VBlank.
+*/
+void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+* Wait till we are in VActive (anywhere in VActive)
+*/
+void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce110_timing_generator_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: dce110_timing_generator_setup_global_swap_lock
+ *
+ * @brief
+ * Setups Global Swap Lock group for current pipe
+ * Pipe can join or leave GSL group, become a TimingServer or TimingClient
+ *
+ * @param [in] gsl_params: setup data
+ *****************************************************************************
+ */
+
+void dce110_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
+ uint32_t check_point = FLIP_READY_BACK_LOOKUP;
+
+ value = dm_read_reg(tg->ctx, address);
+
+ /* This pipe will belong to GSL Group zero. */
+ set_reg_field_value(value,
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL0_EN);
+
+ set_reg_field_value(value,
+ gsl_params->gsl_master == tg->inst,
+ DCP_GSL_CONTROL,
+ DCP_GSL_MASTER_EN);
+
+ set_reg_field_value(value,
+ HFLIP_READY_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
+
+ /* Keep signal low (pending high) during 6 lines.
+ * Also defines minimum interval before re-checking signal. */
+ set_reg_field_value(value,
+ HFLIP_CHECK_DELAY,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+
+
+ {
+ uint32_t value_crtc_vtotal;
+
+ value_crtc_vtotal = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_TOTAL));
+
+ set_reg_field_value(value,
+ 0,/* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ DCP_GSL_CONTROL,
+ DCP_GSL_SYNC_SOURCE);
+
+ /* Checkpoint relative to end of frame */
+ check_point = get_reg_field_value(value_crtc_vtotal,
+ CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_GSL_WINDOW), 0);
+ }
+
+ set_reg_field_value(value,
+ 1,
+ DCP_GSL_CONTROL,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
+
+ dm_write_reg(tg->ctx, address, value);
+
+ /********************************************************************/
+ address = CRTC_REG(mmCRTC_GSL_CONTROL);
+
+ value = 0;
+ set_reg_field_value(value,
+ check_point - FLIP_READY_BACK_LOOKUP,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM);
+
+ set_reg_field_value(value,
+ VFLIP_READY_DELAY,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_FORCE_DELAY);
+
+ dm_write_reg(tg->ctx, address, value);
+}
+
+void dce110_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ /* Clear all the register writes done by
+ * dce110_timing_generator_setup_global_swap_lock
+ */
+
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t address = DCP_REG(mmDCP_GSL_CONTROL);
+
+ value = 0;
+
+ /* This pipe will belong to GSL Group zero. */
+ /* Settig HW default values from reg specs */
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL0_EN);
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_MASTER_EN);
+
+ set_reg_field_value(value,
+ 0x2,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_FORCE_DELAY);
+
+ set_reg_field_value(value,
+ 0x6,
+ DCP_GSL_CONTROL,
+ DCP_GSL_HSYNC_FLIP_CHECK_DELAY);
+
+ /* Restore DCP_GSL_PURPOSE_SURFACE_FLIP */
+ {
+ uint32_t value_crtc_vtotal;
+
+ value_crtc_vtotal = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_TOTAL));
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_SYNC_SOURCE);
+ }
+
+ set_reg_field_value(value,
+ 0,
+ DCP_GSL_CONTROL,
+ DCP_GSL_DELAY_SURFACE_UPDATE_PENDING);
+
+ dm_write_reg(tg->ctx, address, value);
+
+ /********************************************************************/
+ address = CRTC_REG(mmCRTC_GSL_CONTROL);
+
+ value = 0;
+ set_reg_field_value(value,
+ 0,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM);
+
+ set_reg_field_value(value,
+ 0x2,
+ CRTC_GSL_CONTROL,
+ CRTC_GSL_FORCE_DELAY);
+
+ dm_write_reg(tg->ctx, address, value);
+}
+/**
+ *****************************************************************************
+ * Function: is_counter_moving
+ *
+ * @brief
+ * check if the timing generator is currently going
+ *
+ * @return
+ * true if currently going, false if currently paused or stopped.
+ *
+ *****************************************************************************
+ */
+bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg)
+{
+ struct crtc_position position1, position2;
+
+ tg->funcs->get_position(tg, &position1);
+ tg->funcs->get_position(tg, &position2);
+
+ if (position1.horizontal_count == position2.horizontal_count &&
+ position1.vertical_count == position2.vertical_count)
+ return false;
+ else
+ return true;
+}
+
+void dce110_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/*TODO: Figure out if we need this function. */
+void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
+ bool lock)
+{
+ struct dc_context *ctx = tg->ctx;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_MASTER_UPDATE_LOCK);
+ uint32_t value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ lock ? 1 : 0,
+ CRTC_MASTER_UPDATE_LOCK,
+ MASTER_UPDATE_LOCK);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source_tg_inst)
+{
+ uint32_t value;
+ uint32_t rising_edge = 0;
+ uint32_t falling_edge = 0;
+ enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Setup trigger edge */
+ {
+ uint32_t pol_value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_V_SYNC_A_CNTL));
+
+ /* Register spec has reversed definition:
+ * 0 for positive, 1 for negative */
+ if (get_reg_field_value(pol_value,
+ CRTC_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL) == 0) {
+ rising_edge = 1;
+ } else {
+ falling_edge = 1;
+ }
+ }
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
+
+ trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0;
+
+ set_reg_field_value(value,
+ trig_src_select,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
+
+ set_reg_field_value(value,
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
+
+ set_reg_field_value(value,
+ rising_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_RISING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ falling_edge,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL);
+
+ set_reg_field_value(value,
+ 0, /* send every signal */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_FREQUENCY_SELECT);
+
+ set_reg_field_value(value,
+ 0, /* no delay */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_DELAY);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
+
+ /**************************************************************/
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 2, /* force H count to H_TOTAL and V count to V_TOTAL */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 1, /* TriggerB - we never use TriggerA */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+}
+
+void dce110_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ set_reg_field_value(value,
+ 0, /* force counter now mode is disabled */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL), value);
+
+ /********************************************************************/
+ value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL));
+
+ set_reg_field_value(value,
+ TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT);
+
+ set_reg_field_value(value,
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_POLARITY_SELECT);
+
+ set_reg_field_value(value,
+ 1, /* clear trigger status */
+ CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_CLEAR);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_TRIGB_CNTL), value);
+}
+
+/**
+ *****************************************************************************
+ * @brief
+ * Checks whether CRTC triggered reset occurred
+ *
+ * @return
+ * true if triggered reset occurred, false otherwise
+ *****************************************************************************
+ */
+bool dce110_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg(tg->ctx,
+ CRTC_REG(mmCRTC_FORCE_COUNT_NOW_CNTL));
+
+ return get_reg_field_value(value,
+ CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+}
+
+/**
+ * dce110_timing_generator_disable_vga
+ * Turn OFF VGA Mode and Timing - DxVGA_CONTROL
+ * VGA Mode and VGA Timing is used by VBIOS on CRT Monitors;
+ */
+void dce110_timing_generator_disable_vga(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ switch (tg110->controller_id) {
+ case CONTROLLER_ID_D0:
+ addr = mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D1:
+ addr = mmD2VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D2:
+ addr = mmD3VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D3:
+ addr = mmD4VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D4:
+ addr = mmD5VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D5:
+ addr = mmD6VGA_CONTROL;
+ break;
+ default:
+ break;
+ }
+ value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT);
+ set_reg_field_value(
+ value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+/**
+* set_overscan_color_black
+*
+* @param :black_color is one of the color space
+* :this routine will set overscan black color according to the color space.
+* @return none
+*/
+
+void dce110_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t addr;
+ uint32_t value = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ set_reg_field_value(
+ value,
+ color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ set_reg_field_value(
+ value,
+ color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
+ dm_write_reg(ctx, addr, value);
+ addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ dm_write_reg(ctx, addr, value);
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
+ dm_write_reg(ctx, addr, value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+
+}
+
+void dce110_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_BLACK_COLOR);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = CRTC_REG(mmCRTC_BLANK_DATA_COLOR);
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+void dce110_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ addr = CRTC_REG(mmCRTC_OVERSCAN_COLOR);
+ dm_write_reg(ctx, addr, value);
+}
+
+void dce110_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce110_timing_generator_program_blanking(tg, timing);
+}
+
+bool dce110_tg_is_blanked(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL));
+
+ if (get_reg_field_value(
+ value,
+ CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN) == 1 &&
+ get_reg_field_value(
+ value,
+ CRTC_BLANK_CONTROL,
+ CRTC_CURRENT_BLANK_STATE) == 1)
+ return true;
+ return false;
+}
+
+void dce110_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_DOUBLE_BUFFER_CONTROL,
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_DOUBLE_BUFFER_CONTROL), value);
+ value = 0;
+
+ if (enable_blanking) {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), value);
+
+ } else
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_BLANK_CONTROL), 0);
+}
+
+bool dce110_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ return dce110_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
+}
+
+void dce110_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce110_timing_generator_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce110_timing_generator_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dce110_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce110_tg_program_blank_color(tg, blank_color);
+ if (overscan_color != NULL)
+ dce110_tg_set_overscan_color(tg, overscan_color);
+}
+
+/* Gets first line of blank region of the display timing for CRTC
+ * and programms is as a trigger to fire vertical interrupt
+ */
+bool dce110_arm_vert_intr(struct timing_generator *tg, uint8_t width)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_blank_start = 0;
+ uint32_t v_blank_end = 0;
+ uint32_t val = 0;
+ uint32_t h_position, v_position;
+
+ tg->funcs->get_scanoutpos(
+ tg,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ if (v_blank_start == 0 || v_blank_end == 0)
+ return false;
+
+ set_reg_field_value(
+ val,
+ v_blank_start,
+ CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_START);
+
+ /* Set interval width for interrupt to fire to 1 scanline */
+ set_reg_field_value(
+ val,
+ v_blank_start + width,
+ CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_END);
+
+ dm_write_reg(tg->ctx, CRTC_REG(mmCRTC_VERTICAL_INTERRUPT0_POSITION), val);
+
+ return true;
+}
+
+static const struct timing_generator_funcs dce110_tg_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = dce110_tg_program_timing,
+ .enable_crtc = dce110_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ .get_position = dce110_timing_generator_get_position,
+ .get_frame_count = dce110_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce110_timing_generator_set_early_control,
+ .wait_for_state = dce110_tg_wait_for_state,
+ .set_blank = dce110_tg_set_blank,
+ .is_blanked = dce110_tg_is_blanked,
+ .set_colors = dce110_tg_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_program_blank_color,
+ .disable_vga = dce110_timing_generator_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_tear_down_global_swap_lock,
+ .enable_advanced_request =
+ dce110_timing_generator_enable_advanced_request,
+ .set_drr =
+ dce110_timing_generator_set_drr,
+ .set_static_screen_control =
+ dce110_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce110_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce110_arm_vert_intr,
+};
+
+void dce110_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+
+ tg110->offsets = *offsets;
+
+ tg110->base.funcs = &dce110_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
new file mode 100644
index 000000000000..82737dea6984
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE110_H__
+#define __DC_TIMING_GENERATOR_DCE110_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+
+/* GSL Sync related values */
+
+/* In VSync mode, after 4 units of time, master pipe will generate
+ * flip_ready signal */
+#define VFLIP_READY_DELAY 4
+/* In HSync mode, after 2 units of time, master pipe will generate
+ * flip_ready signal */
+#define HFLIP_READY_DELAY 2
+/* 6 lines delay between forcing flip and checking all pipes ready */
+#define HFLIP_CHECK_DELAY 6
+/* 3 lines before end of frame */
+#define FLIP_READY_BACK_LOOKUP 3
+
+/* Trigger Source Select - ASIC-defendant, actual values for the
+ * register programming */
+enum trigger_source_select {
+ TRIGGER_SOURCE_SELECT_LOGIC_ZERO = 0,
+ TRIGGER_SOURCE_SELECT_CRTC_VSYNCA = 1,
+ TRIGGER_SOURCE_SELECT_CRTC_HSYNCA = 2,
+ TRIGGER_SOURCE_SELECT_CRTC_VSYNCB = 3,
+ TRIGGER_SOURCE_SELECT_CRTC_HSYNCB = 4,
+ TRIGGER_SOURCE_SELECT_GENERICF = 5,
+ TRIGGER_SOURCE_SELECT_GENERICE = 6,
+ TRIGGER_SOURCE_SELECT_VSYNCA = 7,
+ TRIGGER_SOURCE_SELECT_HSYNCA = 8,
+ TRIGGER_SOURCE_SELECT_VSYNCB = 9,
+ TRIGGER_SOURCE_SELECT_HSYNCB = 10,
+ TRIGGER_SOURCE_SELECT_HPD1 = 11,
+ TRIGGER_SOURCE_SELECT_HPD2 = 12,
+ TRIGGER_SOURCE_SELECT_GENERICD = 13,
+ TRIGGER_SOURCE_SELECT_GENERICC = 14,
+ TRIGGER_SOURCE_SELECT_VIDEO_CAPTURE = 15,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP0 = 16,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP1 = 17,
+ TRIGGER_SOURCE_SELECT_GSL_GROUP2 = 18,
+ TRIGGER_SOURCE_SELECT_BLONY = 19,
+ TRIGGER_SOURCE_SELECT_GENERICA = 20,
+ TRIGGER_SOURCE_SELECT_GENERICB = 21,
+ TRIGGER_SOURCE_SELECT_GSL_ALLOW_FLIP = 22,
+ TRIGGER_SOURCE_SELECT_MANUAL_TRIGGER = 23
+};
+
+/* Trigger Source Select - ASIC-dependant, actual values for the
+ * register programming */
+enum trigger_polarity_select {
+ TRIGGER_POLARITY_SELECT_LOGIC_ZERO = 0,
+ TRIGGER_POLARITY_SELECT_CRTC = 1,
+ TRIGGER_POLARITY_SELECT_GENERICA = 2,
+ TRIGGER_POLARITY_SELECT_GENERICB = 3,
+ TRIGGER_POLARITY_SELECT_HSYNCA = 4,
+ TRIGGER_POLARITY_SELECT_HSYNCB = 5,
+ TRIGGER_POLARITY_SELECT_VIDEO_CAPTURE = 6,
+ TRIGGER_POLARITY_SELECT_GENERICC = 7
+};
+
+
+struct dce110_timing_generator_offsets {
+ int32_t crtc;
+ int32_t dcp;
+
+ /* DCE80 use only */
+ int32_t dmif;
+};
+
+struct dce110_timing_generator {
+ struct timing_generator base;
+ struct dce110_timing_generator_offsets offsets;
+ struct dce110_timing_generator_offsets derived_offsets;
+
+ enum controller_id controller_id;
+
+ uint32_t max_h_total;
+ uint32_t max_v_total;
+
+ uint32_t min_h_blank;
+ uint32_t min_h_front_porch;
+ uint32_t min_h_back_porch;
+
+ /* DCE 12 */
+ uint32_t min_h_sync_width;
+ uint32_t min_v_sync_width;
+ uint32_t min_v_blank;
+
+};
+
+#define DCE110TG_FROM_TG(tg)\
+ container_of(tg, struct dce110_timing_generator, base)
+
+void dce110_timing_generator_construct(
+ struct dce110_timing_generator *tg,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+/* determine if given timing can be supported by TG */
+bool dce110_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal);
+
+/******** HW programming ************/
+
+/* Program timing generator with given timing */
+bool dce110_timing_generator_program_timing_generator(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing);
+
+/* Disable/Enable Timing Generator */
+bool dce110_timing_generator_enable_crtc(struct timing_generator *tg);
+bool dce110_timing_generator_disable_crtc(struct timing_generator *tg);
+
+void dce110_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl);
+
+/**************** TG current status ******************/
+
+/* return the current frame counter. Used by Linux kernel DRM */
+uint32_t dce110_timing_generator_get_vblank_counter(
+ struct timing_generator *tg);
+
+void dce110_timing_generator_get_position(
+ struct timing_generator *tg,
+ struct crtc_position *position);
+
+/* return true if TG counter is moving. false if TG is stopped */
+bool dce110_timing_generator_is_counter_moving(struct timing_generator *tg);
+
+/* wait until TG is in beginning of vertical blank region */
+void dce110_timing_generator_wait_for_vblank(struct timing_generator *tg);
+
+/* wait until TG is in beginning of active region */
+void dce110_timing_generator_wait_for_vactive(struct timing_generator *tg);
+
+/*********** Timing Generator Synchronization routines ****/
+
+/* Setups Global Swap Lock group, TimingServer or TimingClient*/
+void dce110_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params);
+
+/* Clear all the register writes done by setup_global_swap_lock */
+void dce110_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg);
+
+/* Reset slave controllers on master VSync */
+void dce110_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source);
+
+/* disabling trigger-reset */
+void dce110_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg);
+
+/* Checks whether CRTC triggered reset occurred */
+bool dce110_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg);
+
+/******** Stuff to move to other virtual HW objects *****************/
+/* Move to enable accelerated mode */
+void dce110_timing_generator_disable_vga(struct timing_generator *tg);
+/* TODO: Should we move it to transform */
+/* Fully program CRTC timing in timing generator */
+void dce110_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+/* TODO: Should we move it to opp? */
+/* Combine with below and move YUV/RGB color conversion to SW layer */
+void dce110_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color);
+/* Combine with above and move YUV/RGB color conversion to SW layer */
+void dce110_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color);
+void dce110_timing_generator_color_space_to_black_color(
+ enum dc_color_space colorspace,
+ struct tg_color *black_color);
+/*************** End-of-move ********************/
+
+/* Not called yet */
+void dce110_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth);
+
+void dce110_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params);
+
+void dce110_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value);
+
+void dce110_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+
+void dce110_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing);
+
+void dce110_timing_generator_set_lock_master(struct timing_generator *tg,
+ bool lock);
+
+void dce110_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color);
+
+void dce110_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color);
+
+void dce110_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios);
+
+bool dce110_tg_is_blanked(struct timing_generator *tg);
+
+void dce110_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking);
+
+bool dce110_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+void dce110_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state);
+
+void dce110_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color);
+
+bool dce110_arm_vert_intr(
+ struct timing_generator *tg, uint8_t width);
+
+#endif /* __DC_TIMING_GENERATOR_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
new file mode 100644
index 000000000000..07d9303d5477
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
@@ -0,0 +1,688 @@
+#include "dm_services.h"
+
+/* include DCE11 register header files */
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+#include "dc.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce110_timing_generator.h"
+#include "dce110_timing_generator_v.h"
+
+#include "timing_generator.h"
+
+/** ********************************************************************************
+ *
+ * DCE11 Timing Generator Implementation
+ *
+ **********************************************************************************/
+
+/**
+* Enable CRTCV
+*/
+
+static bool dce110_timing_generator_v_enable_crtc(struct timing_generator *tg)
+{
+/*
+* Set MASTER_UPDATE_MODE to 0
+* This is needed for DRR, and also suggested to be default value by Syed.
+*/
+
+ uint32_t value;
+
+ value = 0;
+ set_reg_field_value(value, 0,
+ CRTCV_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_MASTER_UPDATE_MODE, value);
+
+ /* TODO: may want this on for looking for underflow */
+ value = 0;
+ dm_write_reg(tg->ctx, mmCRTCV_MASTER_UPDATE_MODE, value);
+
+ value = 0;
+ set_reg_field_value(value, 1,
+ CRTCV_MASTER_EN, CRTC_MASTER_EN);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_MASTER_EN, value);
+
+ return true;
+}
+
+static bool dce110_timing_generator_v_disable_crtc(struct timing_generator *tg)
+{
+ uint32_t value;
+
+ value = dm_read_reg(tg->ctx,
+ mmCRTCV_CONTROL);
+ set_reg_field_value(value, 0,
+ CRTCV_CONTROL, CRTC_DISABLE_POINT_CNTL);
+ set_reg_field_value(value, 0,
+ CRTCV_CONTROL, CRTC_MASTER_EN);
+ dm_write_reg(tg->ctx,
+ mmCRTCV_CONTROL, value);
+ /*
+ * TODO: call this when adding stereo support
+ * tg->funcs->disable_stereo(tg);
+ */
+ return true;
+}
+
+static void dce110_timing_generator_v_blank_crtc(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_BLANK_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DE_MODE);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_unblank_crtc(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_BLANK_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_BLANK_CONTROL,
+ CRTC_BLANK_DE_MODE);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static bool dce110_timing_generator_v_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t addr = 0;
+ uint32_t value = 0;
+ uint32_t field = 0;
+
+ addr = mmCRTCV_STATUS;
+ value = dm_read_reg(tg->ctx, addr);
+ field = get_reg_field_value(value, CRTCV_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+static bool dce110_timing_generator_v_is_counter_moving(struct timing_generator *tg)
+{
+ uint32_t value;
+ uint32_t h1 = 0;
+ uint32_t h2 = 0;
+ uint32_t v1 = 0;
+ uint32_t v2 = 0;
+
+ value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
+
+ h1 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ v1 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg(tg->ctx, mmCRTCV_STATUS_POSITION);
+
+ h2 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ v2 = get_reg_field_value(
+ value,
+ CRTCV_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ if (h1 == h2 && v1 == v2)
+ return false;
+ else
+ return true;
+}
+
+static void dce110_timing_generator_v_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/**
+* Wait till we are in VActive (anywhere in VActive)
+*/
+static void dce110_timing_generator_v_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce110_timing_generator_v_is_in_vertical_blank(tg)) {
+ if (!dce110_timing_generator_v_is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+static void dce110_timing_generator_v_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce110_timing_generator_v_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce110_timing_generator_v_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void dce110_timing_generator_v_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+ uint32_t tmp = 0;
+
+ addr = mmCRTCV_H_TOTAL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->h_total - 1,
+ CRTCV_H_TOTAL,
+ CRTC_H_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_TOTAL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->v_total - 1,
+ CRTCV_V_TOTAL,
+ CRTC_V_TOTAL);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_BLANK_START_END;
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_H_BLANK_START_END,
+ CRTC_H_BLANK_END);
+
+ tmp = tmp + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_H_BLANK_START_END,
+ CRTC_H_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_BLANK_START_END;
+ value = dm_read_reg(ctx, addr);
+
+ tmp = timing->v_total - (v_sync_start + timing->v_border_top);
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ tmp = tmp + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ set_reg_field_value(
+ value,
+ tmp,
+ CRTCV_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_SYNC_A;
+ value = 0;
+ set_reg_field_value(
+ value,
+ timing->h_sync_width,
+ CRTCV_H_SYNC_A,
+ CRTC_H_SYNC_A_END);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_H_SYNC_A_CNTL;
+ value = dm_read_reg(ctx, addr);
+ if (timing->flags.HSYNC_POSITIVE_POLARITY) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_H_SYNC_A_CNTL,
+ CRTC_H_SYNC_A_POL);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_H_SYNC_A_CNTL,
+ CRTC_H_SYNC_A_POL);
+ }
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_SYNC_A;
+ value = 0;
+ set_reg_field_value(
+ value,
+ timing->v_sync_width,
+ CRTCV_V_SYNC_A,
+ CRTC_V_SYNC_A_END);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_V_SYNC_A_CNTL;
+ value = dm_read_reg(ctx, addr);
+ if (timing->flags.VSYNC_POSITIVE_POLARITY) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL);
+ }
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmCRTCV_INTERLACE_CONTROL;
+ value = dm_read_reg(ctx, addr);
+ set_reg_field_value(
+ value,
+ timing->flags.INTERLACE,
+ CRTCV_INTERLACE_CONTROL,
+ CRTC_INTERLACE_ENABLE);
+ dm_write_reg(ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t addr = mmCRTCV_START_LINE_CONTROL;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ }
+ set_reg_field_value(
+ value,
+ 0,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 2,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTCV_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ if (enable_blanking)
+ dce110_timing_generator_v_blank_crtc(tg);
+ else
+ dce110_timing_generator_v_unblank_crtc(tg);
+}
+
+static void dce110_timing_generator_v_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce110_timing_generator_v_program_blanking(tg, timing);
+}
+
+static void dce110_timing_generator_v_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ uint32_t addr = mmCRTCV_BLACK_COLOR;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t addr;
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ color->color_b_cb,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ color->color_r_cr,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ set_reg_field_value(
+ value,
+ color->color_g_y,
+ CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ addr = mmCRTCV_OVERSCAN_COLOR;
+ dm_write_reg(ctx, addr, value);
+ addr = mmCRTCV_BLACK_COLOR;
+ dm_write_reg(ctx, addr, value);
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ addr = mmCRTCV_BLANK_DATA_COLOR;
+ dm_write_reg(ctx, addr, value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+}
+
+static void dce110_tg_v_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ uint32_t addr = mmCRTCV_BLACK_COLOR;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ black_color->color_b_cb,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB);
+ set_reg_field_value(
+ value,
+ black_color->color_g_y,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_G_Y);
+ set_reg_field_value(
+ value,
+ black_color->color_r_cr,
+ CRTCV_BLACK_COLOR,
+ CRTC_BLACK_COLOR_R_CR);
+
+ dm_write_reg(tg->ctx, addr, value);
+
+ addr = mmCRTCV_BLANK_DATA_COLOR;
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value = 0;
+ uint32_t addr;
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_b_cb,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_g_y,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_GREEN);
+
+ set_reg_field_value(
+ value,
+ overscan_color->color_r_cr,
+ CRTCV_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_RED);
+
+ addr = mmCRTCV_OVERSCAN_COLOR;
+ dm_write_reg(ctx, addr, value);
+}
+
+static void dce110_timing_generator_v_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce110_tg_v_program_blank_color(tg, blank_color);
+ if (overscan_color != NULL)
+ dce110_timing_generator_v_set_overscan_color(tg, overscan_color);
+}
+
+static void dce110_timing_generator_v_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ uint32_t regval;
+ uint32_t address = mmCRTC_CONTROL;
+
+ regval = dm_read_reg(tg->ctx, address);
+ set_reg_field_value(regval, early_cntl,
+ CRTCV_CONTROL, CRTC_HBLANK_EARLY_CONTROL);
+ dm_write_reg(tg->ctx, address, regval);
+}
+
+static uint32_t dce110_timing_generator_v_get_vblank_counter(struct timing_generator *tg)
+{
+ uint32_t addr = mmCRTCV_STATUS_FRAME_COUNT;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+ uint32_t field = get_reg_field_value(
+ value, CRTCV_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+static bool dce110_timing_generator_v_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return false;
+}
+
+static void dce110_timing_generator_v_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source_tg_inst)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ dm_logger_write(tg->ctx->logger, LOG_ERROR,
+ "Timing Sync not supported on underlay pipe\n");
+ return;
+}
+
+static void dce110_timing_generator_v_disable_vga(
+ struct timing_generator *tg)
+{
+ return;
+}
+
+static bool dce110_tg_v_is_blanked(struct timing_generator *tg)
+{
+ /* Signal comes from the primary pipe, underlay is never blanked. */
+ return false;
+}
+
+/** ********************************************************************************************
+ *
+ * DCE11 Timing Generator Constructor / Destructor
+ *
+ *********************************************************************************************/
+static const struct timing_generator_funcs dce110_tg_v_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = dce110_timing_generator_v_program_timing,
+ .enable_crtc = dce110_timing_generator_v_enable_crtc,
+ .disable_crtc = dce110_timing_generator_v_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_v_is_counter_moving,
+ .get_position = NULL, /* Not to be implemented for underlay*/
+ .get_frame_count = dce110_timing_generator_v_get_vblank_counter,
+ .set_early_control = dce110_timing_generator_v_set_early_control,
+ .wait_for_state = dce110_timing_generator_v_wait_for_state,
+ .set_blank = dce110_timing_generator_v_set_blank,
+ .is_blanked = dce110_tg_v_is_blanked,
+ .set_colors = dce110_timing_generator_v_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_v_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_v_program_blank_color,
+ .disable_vga = dce110_timing_generator_v_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_v_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_v_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_v_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_v_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_v_tear_down_global_swap_lock,
+ .enable_advanced_request =
+ dce110_timing_generator_v_enable_advanced_request
+};
+
+void dce110_timing_generator_v_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx)
+{
+ tg110->controller_id = CONTROLLER_ID_UNDERLAY0;
+
+ tg110->base.funcs = &dce110_tg_v_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
new file mode 100644
index 000000000000..d2623a5994e8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_V_DCE110_H__
+#define __DC_TIMING_GENERATOR_V_DCE110_H__
+
+void dce110_timing_generator_v_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx);
+
+#endif /* __DC_TIMING_GENERATOR_V_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
new file mode 100644
index 000000000000..47390dc58306
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
@@ -0,0 +1,716 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110_transform_v.h"
+#include "dm_services.h"
+#include "dc.h"
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define SCLV_PHASES 64
+
+struct sclv_ratios_inits {
+ uint32_t h_int_scale_ratio_luma;
+ uint32_t h_int_scale_ratio_chroma;
+ uint32_t v_int_scale_ratio_luma;
+ uint32_t v_int_scale_ratio_chroma;
+ struct init_int_and_frac h_init_luma;
+ struct init_int_and_frac h_init_chroma;
+ struct init_int_and_frac v_init_luma;
+ struct init_int_and_frac v_init_chroma;
+};
+
+static void calculate_viewport(
+ const struct scaler_data *scl_data,
+ struct rect *luma_viewport,
+ struct rect *chroma_viewport)
+{
+ /*Do not set chroma vp for rgb444 pixel format*/
+ luma_viewport->x = scl_data->viewport.x - scl_data->viewport.x % 2;
+ luma_viewport->y = scl_data->viewport.y - scl_data->viewport.y % 2;
+ luma_viewport->width =
+ scl_data->viewport.width - scl_data->viewport.width % 2;
+ luma_viewport->height =
+ scl_data->viewport.height - scl_data->viewport.height % 2;
+ chroma_viewport->x = luma_viewport->x;
+ chroma_viewport->y = luma_viewport->y;
+ chroma_viewport->height = luma_viewport->height;
+ chroma_viewport->width = luma_viewport->width;
+
+ if (scl_data->format == PIXEL_FORMAT_420BPP8) {
+ luma_viewport->height += luma_viewport->height % 2;
+ luma_viewport->width += luma_viewport->width % 2;
+ /*for 420 video chroma is 1/4 the area of luma, scaled
+ *vertically and horizontally
+ */
+ chroma_viewport->x = luma_viewport->x / 2;
+ chroma_viewport->y = luma_viewport->y / 2;
+ chroma_viewport->height = luma_viewport->height / 2;
+ chroma_viewport->width = luma_viewport->width / 2;
+ }
+}
+
+static void program_viewport(
+ struct dce_transform *xfm_dce,
+ struct rect *luma_view_port,
+ struct rect *chroma_view_port)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t value = 0;
+ uint32_t addr = 0;
+
+ if (luma_view_port->width != 0 && luma_view_port->height != 0) {
+ addr = mmSCLV_VIEWPORT_START;
+ value = 0;
+ set_reg_field_value(
+ value,
+ luma_view_port->x,
+ SCLV_VIEWPORT_START,
+ VIEWPORT_X_START);
+ set_reg_field_value(
+ value,
+ luma_view_port->y,
+ SCLV_VIEWPORT_START,
+ VIEWPORT_Y_START);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VIEWPORT_SIZE;
+ value = 0;
+ set_reg_field_value(
+ value,
+ luma_view_port->height,
+ SCLV_VIEWPORT_SIZE,
+ VIEWPORT_HEIGHT);
+ set_reg_field_value(
+ value,
+ luma_view_port->width,
+ SCLV_VIEWPORT_SIZE,
+ VIEWPORT_WIDTH);
+ dm_write_reg(ctx, addr, value);
+ }
+
+ if (chroma_view_port->width != 0 && chroma_view_port->height != 0) {
+ addr = mmSCLV_VIEWPORT_START_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ chroma_view_port->x,
+ SCLV_VIEWPORT_START_C,
+ VIEWPORT_X_START_C);
+ set_reg_field_value(
+ value,
+ chroma_view_port->y,
+ SCLV_VIEWPORT_START_C,
+ VIEWPORT_Y_START_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VIEWPORT_SIZE_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ chroma_view_port->height,
+ SCLV_VIEWPORT_SIZE_C,
+ VIEWPORT_HEIGHT_C);
+ set_reg_field_value(
+ value,
+ chroma_view_port->width,
+ SCLV_VIEWPORT_SIZE_C,
+ VIEWPORT_WIDTH_C);
+ dm_write_reg(ctx, addr, value);
+ }
+}
+
+/*
+ * Function:
+ * void setup_scaling_configuration
+ *
+ * Purpose: setup scaling mode : bypass, RGb, YCbCr and nummber of taps
+ * Input: data
+ *
+ * Output:
+ * void
+ */
+static bool setup_scaling_configuration(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ bool is_scaling_needed = false;
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t value = 0;
+
+ set_reg_field_value(value, data->taps.h_taps - 1,
+ SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS);
+ set_reg_field_value(value, data->taps.v_taps - 1,
+ SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS);
+ set_reg_field_value(value, data->taps.h_taps_c - 1,
+ SCLV_TAP_CONTROL, SCL_H_NUM_OF_TAPS_C);
+ set_reg_field_value(value, data->taps.v_taps_c - 1,
+ SCLV_TAP_CONTROL, SCL_V_NUM_OF_TAPS_C);
+ dm_write_reg(ctx, mmSCLV_TAP_CONTROL, value);
+
+ value = 0;
+ if (data->taps.h_taps + data->taps.v_taps > 2) {
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE);
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN);
+ is_scaling_needed = true;
+ } else {
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE);
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN);
+ }
+
+ if (data->taps.h_taps_c + data->taps.v_taps_c > 2) {
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_MODE_C);
+ set_reg_field_value(value, 1, SCLV_MODE, SCL_PSCL_EN_C);
+ is_scaling_needed = true;
+ } else if (data->format != PIXEL_FORMAT_420BPP8) {
+ set_reg_field_value(
+ value,
+ get_reg_field_value(value, SCLV_MODE, SCL_MODE),
+ SCLV_MODE,
+ SCL_MODE_C);
+ set_reg_field_value(
+ value,
+ get_reg_field_value(value, SCLV_MODE, SCL_PSCL_EN),
+ SCLV_MODE,
+ SCL_PSCL_EN_C);
+ } else {
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_MODE_C);
+ set_reg_field_value(value, 0, SCLV_MODE, SCL_PSCL_EN_C);
+ }
+ dm_write_reg(ctx, mmSCLV_MODE, value);
+
+ value = 0;
+ /*
+ * 0 - Replaced out of bound pixels with black pixel
+ * (or any other required color)
+ * 1 - Replaced out of bound pixels with the edge pixel
+ */
+ set_reg_field_value(value, 1, SCLV_CONTROL, SCL_BOUNDARY_MODE);
+ dm_write_reg(ctx, mmSCLV_CONTROL, value);
+
+ return is_scaling_needed;
+}
+
+/**
+* Function:
+* void program_overscan
+*
+* Purpose: Programs overscan border
+* Input: overscan
+*
+* Output:
+ void
+*/
+static void program_overscan(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data)
+{
+ uint32_t overscan_left_right = 0;
+ uint32_t overscan_top_bottom = 0;
+
+ int overscan_right = data->h_active - data->recout.x - data->recout.width;
+ int overscan_bottom = data->v_active - data->recout.y - data->recout.height;
+
+ if (xfm_dce->base.ctx->dc->debug.surface_visual_confirm) {
+ overscan_bottom += 2;
+ overscan_right += 2;
+ }
+
+ if (overscan_right < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_right = 0;
+ }
+ if (overscan_bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ overscan_bottom = 0;
+ }
+
+ set_reg_field_value(overscan_left_right, data->recout.x,
+ EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT);
+
+ set_reg_field_value(overscan_left_right, overscan_right,
+ EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT);
+
+ set_reg_field_value(overscan_top_bottom, data->recout.y,
+ EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP);
+
+ set_reg_field_value(overscan_top_bottom, overscan_bottom,
+ EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmSCLV_EXT_OVERSCAN_LEFT_RIGHT,
+ overscan_left_right);
+
+ dm_write_reg(xfm_dce->base.ctx,
+ mmSCLV_EXT_OVERSCAN_TOP_BOTTOM,
+ overscan_top_bottom);
+}
+
+static void set_coeff_update_complete(
+ struct dce_transform *xfm_dce)
+{
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx, mmSCLV_UPDATE);
+ set_reg_field_value(value, 1, SCLV_UPDATE, SCL_COEF_UPDATE_COMPLETE);
+ dm_write_reg(xfm_dce->base.ctx, mmSCLV_UPDATE, value);
+}
+
+static void program_multi_taps_filter(
+ struct dce_transform *xfm_dce,
+ int taps,
+ const uint16_t *coeffs,
+ enum ram_filter_type filter_type)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ int i, phase, pair;
+ int array_idx = 0;
+ int taps_pairs = (taps + 1) / 2;
+ int phases_to_program = SCLV_PHASES / 2 + 1;
+
+ uint32_t select = 0;
+ uint32_t power_ctl, power_ctl_off;
+
+ if (!coeffs)
+ return;
+
+ /*We need to disable power gating on coeff memory to do programming*/
+ power_ctl = dm_read_reg(ctx, mmDCFEV_MEM_PWR_CTRL);
+ power_ctl_off = power_ctl;
+ set_reg_field_value(power_ctl_off, 1, DCFEV_MEM_PWR_CTRL, SCLV_COEFF_MEM_PWR_DIS);
+ dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl_off);
+
+ /*Wait to disable gating:*/
+ for (i = 0; i < 10; i++) {
+ if (get_reg_field_value(
+ dm_read_reg(ctx, mmDCFEV_MEM_PWR_STATUS),
+ DCFEV_MEM_PWR_STATUS,
+ SCLV_COEFF_MEM_PWR_STATE) == 0)
+ break;
+
+ udelay(1);
+ }
+
+ set_reg_field_value(select, filter_type, SCLV_COEF_RAM_SELECT, SCL_C_RAM_FILTER_TYPE);
+
+ for (phase = 0; phase < phases_to_program; phase++) {
+ /*we always program N/2 + 1 phases, total phases N, but N/2-1 are just mirror
+ phase 0 is unique and phase N/2 is unique if N is even*/
+ set_reg_field_value(select, phase, SCLV_COEF_RAM_SELECT, SCL_C_RAM_PHASE);
+ for (pair = 0; pair < taps_pairs; pair++) {
+ uint32_t data = 0;
+
+ set_reg_field_value(select, pair,
+ SCLV_COEF_RAM_SELECT, SCL_C_RAM_TAP_PAIR_IDX);
+
+ dm_write_reg(ctx, mmSCLV_COEF_RAM_SELECT, select);
+
+ set_reg_field_value(
+ data, 1,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_EVEN_TAP_COEF_EN);
+ set_reg_field_value(
+ data, coeffs[array_idx],
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_EVEN_TAP_COEF);
+
+ if (taps % 2 && pair == taps_pairs - 1) {
+ set_reg_field_value(
+ data, 0,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF_EN);
+ array_idx++;
+ } else {
+ set_reg_field_value(
+ data, 1,
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF_EN);
+ set_reg_field_value(
+ data, coeffs[array_idx + 1],
+ SCLV_COEF_RAM_TAP_DATA,
+ SCL_C_RAM_ODD_TAP_COEF);
+
+ array_idx += 2;
+ }
+
+ dm_write_reg(ctx, mmSCLV_COEF_RAM_TAP_DATA, data);
+ }
+ }
+
+ /*We need to restore power gating on coeff memory to initial state*/
+ dm_write_reg(ctx, mmDCFEV_MEM_PWR_CTRL, power_ctl);
+}
+
+static void calculate_inits(
+ struct dce_transform *xfm_dce,
+ const struct scaler_data *data,
+ struct sclv_ratios_inits *inits,
+ struct rect *luma_viewport,
+ struct rect *chroma_viewport)
+{
+ inits->h_int_scale_ratio_luma =
+ dal_fixed31_32_u2d19(data->ratios.horz) << 5;
+ inits->v_int_scale_ratio_luma =
+ dal_fixed31_32_u2d19(data->ratios.vert) << 5;
+ inits->h_int_scale_ratio_chroma =
+ dal_fixed31_32_u2d19(data->ratios.horz_c) << 5;
+ inits->v_int_scale_ratio_chroma =
+ dal_fixed31_32_u2d19(data->ratios.vert_c) << 5;
+
+ inits->h_init_luma.integer = 1;
+ inits->v_init_luma.integer = 1;
+ inits->h_init_chroma.integer = 1;
+ inits->v_init_chroma.integer = 1;
+}
+
+static void program_scl_ratios_inits(
+ struct dce_transform *xfm_dce,
+ struct sclv_ratios_inits *inits)
+{
+ struct dc_context *ctx = xfm_dce->base.ctx;
+ uint32_t addr = mmSCLV_HORZ_FILTER_SCALE_RATIO;
+ uint32_t value = 0;
+
+ set_reg_field_value(
+ value,
+ inits->h_int_scale_ratio_luma,
+ SCLV_HORZ_FILTER_SCALE_RATIO,
+ SCL_H_SCALE_RATIO);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_SCALE_RATIO;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_int_scale_ratio_luma,
+ SCLV_VERT_FILTER_SCALE_RATIO,
+ SCL_V_SCALE_RATIO);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_SCALE_RATIO_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_int_scale_ratio_chroma,
+ SCLV_HORZ_FILTER_SCALE_RATIO_C,
+ SCL_H_SCALE_RATIO_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_SCALE_RATIO_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_int_scale_ratio_chroma,
+ SCLV_VERT_FILTER_SCALE_RATIO_C,
+ SCL_V_SCALE_RATIO_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_INIT;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_init_luma.fraction,
+ SCLV_HORZ_FILTER_INIT,
+ SCL_H_INIT_FRAC);
+ set_reg_field_value(
+ value,
+ inits->h_init_luma.integer,
+ SCLV_HORZ_FILTER_INIT,
+ SCL_H_INIT_INT);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_INIT;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_init_luma.fraction,
+ SCLV_VERT_FILTER_INIT,
+ SCL_V_INIT_FRAC);
+ set_reg_field_value(
+ value,
+ inits->v_init_luma.integer,
+ SCLV_VERT_FILTER_INIT,
+ SCL_V_INIT_INT);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_HORZ_FILTER_INIT_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->h_init_chroma.fraction,
+ SCLV_HORZ_FILTER_INIT_C,
+ SCL_H_INIT_FRAC_C);
+ set_reg_field_value(
+ value,
+ inits->h_init_chroma.integer,
+ SCLV_HORZ_FILTER_INIT_C,
+ SCL_H_INIT_INT_C);
+ dm_write_reg(ctx, addr, value);
+
+ addr = mmSCLV_VERT_FILTER_INIT_C;
+ value = 0;
+ set_reg_field_value(
+ value,
+ inits->v_init_chroma.fraction,
+ SCLV_VERT_FILTER_INIT_C,
+ SCL_V_INIT_FRAC_C);
+ set_reg_field_value(
+ value,
+ inits->v_init_chroma.integer,
+ SCLV_VERT_FILTER_INIT_C,
+ SCL_V_INIT_INT_C);
+ dm_write_reg(ctx, addr, value);
+}
+
+static const uint16_t *get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 4)
+ return get_filter_4tap_64p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static bool dce110_xfmv_power_up_line_buffer(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ uint32_t value;
+
+ value = dm_read_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL);
+
+ /*Use all three pieces of memory always*/
+ set_reg_field_value(value, 0, LBV_MEMORY_CTRL, LB_MEMORY_CONFIG);
+ /*hard coded number DCE11 1712(0x6B0) Partitions: 720/960/1712*/
+ set_reg_field_value(value, xfm_dce->lb_memory_size, LBV_MEMORY_CTRL,
+ LB_MEMORY_SIZE);
+
+ dm_write_reg(xfm_dce->base.ctx, mmLBV_MEMORY_CTRL, value);
+
+ return true;
+}
+
+static void dce110_xfmv_set_scaler(
+ struct transform *xfm,
+ const struct scaler_data *data)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ bool is_scaling_required = false;
+ bool filter_updated = false;
+ const uint16_t *coeffs_v, *coeffs_h, *coeffs_h_c, *coeffs_v_c;
+ struct rect luma_viewport = {0};
+ struct rect chroma_viewport = {0};
+
+ dce110_xfmv_power_up_line_buffer(xfm);
+ /* 1. Calculate viewport, viewport programming should happen after init
+ * calculations as they may require an adjustment in the viewport.
+ */
+
+ calculate_viewport(data, &luma_viewport, &chroma_viewport);
+
+ /* 2. Program overscan */
+ program_overscan(xfm_dce, data);
+
+ /* 3. Program taps and configuration */
+ is_scaling_required = setup_scaling_configuration(xfm_dce, data);
+
+ if (is_scaling_required) {
+ /* 4. Calculate and program ratio, filter initialization */
+
+ struct sclv_ratios_inits inits = { 0 };
+
+ calculate_inits(
+ xfm_dce,
+ data,
+ &inits,
+ &luma_viewport,
+ &chroma_viewport);
+
+ program_scl_ratios_inits(xfm_dce, &inits);
+
+ coeffs_v = get_filter_coeffs_64p(data->taps.v_taps, data->ratios.vert);
+ coeffs_h = get_filter_coeffs_64p(data->taps.h_taps, data->ratios.horz);
+ coeffs_v_c = get_filter_coeffs_64p(data->taps.v_taps_c, data->ratios.vert_c);
+ coeffs_h_c = get_filter_coeffs_64p(data->taps.h_taps_c, data->ratios.horz_c);
+
+ if (coeffs_v != xfm_dce->filter_v
+ || coeffs_v_c != xfm_dce->filter_v_c
+ || coeffs_h != xfm_dce->filter_h
+ || coeffs_h_c != xfm_dce->filter_h_c) {
+ /* 5. Program vertical filters */
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps,
+ coeffs_v,
+ FILTER_TYPE_RGB_Y_VERTICAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.v_taps_c,
+ coeffs_v_c,
+ FILTER_TYPE_CBCR_VERTICAL);
+
+ /* 6. Program horizontal filters */
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps,
+ coeffs_h,
+ FILTER_TYPE_RGB_Y_HORIZONTAL);
+ program_multi_taps_filter(
+ xfm_dce,
+ data->taps.h_taps_c,
+ coeffs_h_c,
+ FILTER_TYPE_CBCR_HORIZONTAL);
+
+ xfm_dce->filter_v = coeffs_v;
+ xfm_dce->filter_v_c = coeffs_v_c;
+ xfm_dce->filter_h = coeffs_h;
+ xfm_dce->filter_h_c = coeffs_h_c;
+ filter_updated = true;
+ }
+ }
+
+ /* 7. Program the viewport */
+ program_viewport(xfm_dce, &luma_viewport, &chroma_viewport);
+
+ /* 8. Set bit to flip to new coefficient memory */
+ if (filter_updated)
+ set_coeff_update_complete(xfm_dce);
+}
+
+static void dce110_xfmv_reset(struct transform *xfm)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+
+ xfm_dce->filter_h = NULL;
+ xfm_dce->filter_v = NULL;
+ xfm_dce->filter_h_c = NULL;
+ xfm_dce->filter_v_c = NULL;
+}
+
+static void dce110_xfmv_set_gamut_remap(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust)
+{
+ /* DO NOTHING*/
+}
+
+static void dce110_xfmv_set_pixel_storage_depth(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params)
+{
+ struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm);
+ int pixel_depth = 0;
+ int expan_mode = 0;
+ uint32_t reg_data = 0;
+
+ switch (depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ pixel_depth = 2;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ pixel_depth = 1;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ pixel_depth = 0;
+ expan_mode = 1;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ pixel_depth = 3;
+ expan_mode = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ set_reg_field_value(
+ reg_data,
+ expan_mode,
+ LBV_DATA_FORMAT,
+ PIXEL_EXPAN_MODE);
+
+ set_reg_field_value(
+ reg_data,
+ pixel_depth,
+ LBV_DATA_FORMAT,
+ PIXEL_DEPTH);
+
+ dm_write_reg(xfm->ctx, mmLBV_DATA_FORMAT, reg_data);
+
+ if (!(xfm_dce->lb_pixel_depth_supported & depth)) {
+ /*we should use unsupported capabilities
+ * unless it is required by w/a*/
+ dm_logger_write(xfm->ctx->logger, LOG_WARNING,
+ "%s: Capability not supported",
+ __func__);
+ }
+}
+
+static const struct transform_funcs dce110_xfmv_funcs = {
+ .transform_reset = dce110_xfmv_reset,
+ .transform_set_scaler = dce110_xfmv_set_scaler,
+ .transform_set_gamut_remap =
+ dce110_xfmv_set_gamut_remap,
+ .opp_set_csc_default = dce110_opp_v_set_csc_default,
+ .opp_set_csc_adjustment = dce110_opp_v_set_csc_adjustment,
+ .opp_power_on_regamma_lut = dce110_opp_power_on_regamma_lut_v,
+ .opp_program_regamma_pwl = dce110_opp_program_regamma_pwl_v,
+ .opp_set_regamma_mode = dce110_opp_set_regamma_mode_v,
+ .transform_set_pixel_storage_depth =
+ dce110_xfmv_set_pixel_storage_depth,
+ .transform_get_optimal_number_of_taps =
+ dce_transform_get_optimal_number_of_taps
+};
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+bool dce110_transform_v_construct(
+ struct dce_transform *xfm_dce,
+ struct dc_context *ctx)
+{
+ xfm_dce->base.ctx = ctx;
+
+ xfm_dce->base.funcs = &dce110_xfmv_funcs;
+
+ xfm_dce->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ xfm_dce->prescaler_on = true;
+ xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
new file mode 100644
index 000000000000..b70780210aad
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h
@@ -0,0 +1,58 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TRANSFORM_V_DCE110_H__
+#define __DAL_TRANSFORM_V_DCE110_H__
+
+#include "../dce/dce_transform.h"
+
+#define LB_TOTAL_NUMBER_OF_ENTRIES 1712
+#define LB_BITS_PER_ENTRY 144
+
+bool dce110_transform_v_construct(
+ struct dce_transform *xfm110,
+ struct dc_context *ctx);
+
+void dce110_opp_v_set_csc_default(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+void dce110_opp_v_set_csc_adjustment(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+
+void dce110_opp_program_regamma_pwl_v(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+void dce110_opp_power_on_regamma_lut_v(
+ struct transform *xfm,
+ bool power_on);
+
+void dce110_opp_set_regamma_mode_v(
+ struct transform *xfm,
+ enum opp_regamma mode);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
new file mode 100644
index 000000000000..265ac4310d85
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE112 = dce112_compressor.o dce112_hw_sequencer.o \
+dce112_resource.o
+
+AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE112)
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
new file mode 100644
index 000000000000..69649928768c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
@@ -0,0 +1,854 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+
+#include "include/logger_interface.h"
+
+#include "dce112_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp110->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp110->offsets.dmif_offset)
+
+static const struct dce112_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset =
+ (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+static uint32_t lpt_size_alignment(struct dce112_compressor *cp110)
+{
+ /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
+ return cp110->base.raw_size * cp110->base.banks_num *
+ cp110->base.dram_channels_num;
+}
+
+static uint32_t lpt_memory_control_config(struct dce112_compressor *cp110,
+ uint32_t lpt_control)
+{
+ /*LPT MC Config */
+ if (cp110->base.options.bits.LPT_MC_CONFIG == 1) {
+ /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
+ * 00 - 1 CHANNEL
+ * 01 - 2 CHANNELS
+ * 02 - 4 OR 6 CHANNELS
+ * (Only for discrete GPU, N/A for CZ)
+ * 03 - 8 OR 12 CHANNELS
+ * (Only for discrete GPU, N/A for CZ) */
+ switch (cp110->base.dram_channels_num) {
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ case 1:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_PIPES!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LPT NUM_BANKS is in
+ * GRPH_CONTROL.GRPH_NUM_BANKS register field
+ * Specifies the number of memory banks for tiling
+ * purposes. Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES:
+ * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
+ * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
+ * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
+ * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
+ switch (cp110->base.banks_num) {
+ case 16:
+ set_reg_field_value(
+ lpt_control,
+ 3,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 8:
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 4:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_BANKS!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping is in DMIF_ADDR_CALC.
+ * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
+ * Carrizo specifies the memory interleave per pipe.
+ * It effectively specifies the location of pipe bits in
+ * the memory address.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
+ * interleave
+ * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
+ * interleave
+ */
+ switch (cp110->base.channel_interleave_size) {
+ case 256: /*256B */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ case 512: /*512B */
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT INTERLEAVE_SIZE!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
+ * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
+ * for Carrizo. Specifies the size of dram row in bytes.
+ * This should match up with NOOFCOLS field in
+ * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
+ * This register DMIF_ADDR_CALC is not used by the
+ * hardware as it is only used for addrlib assertions.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
+ * boundary
+ * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
+ * boundary
+ * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
+ * boundary */
+ switch (cp110->base.raw_size) {
+ case 4096: /*4 KB */
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 2048:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 1024:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT ROW_SIZE!!!",
+ __func__);
+ break;
+ }
+ } else {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: LPT MC Configuration is not provided",
+ __func__);
+ }
+
+ return lpt_control;
+}
+
+static bool is_source_bigger_than_epanel_size(
+ struct dce112_compressor *cp110,
+ uint32_t source_view_width,
+ uint32_t source_view_height)
+{
+ if (cp110->base.embedded_panel_h_size != 0 &&
+ cp110->base.embedded_panel_v_size != 0 &&
+ ((source_view_width * source_view_height) >
+ (cp110->base.embedded_panel_h_size *
+ cp110->base.embedded_panel_v_size)))
+ return true;
+
+ return false;
+}
+
+static uint32_t align_to_chunks_number_per_line(
+ struct dce112_compressor *cp110,
+ uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce112_compressor *cp110,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp110->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ udelay(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp110->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ }
+}
+
+void dce112_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ if (compressor->options.bits.CLK_GATING_DISABLED == 1) {
+ /* HW needs to do power measurement comparison. */
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CNTL,
+ FBC_COMP_CLK_GATE_EN);
+ }
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce112_compressor_enable_fbc(
+ struct compressor *compressor,
+ uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (compressor->options.bits.DUMMY_BACKEND == 0) &&
+ (!dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
+ (!is_source_bigger_than_epanel_size(
+ cp110,
+ params->source_view_width,
+ params->source_view_height))) {
+
+ uint32_t addr;
+ uint32_t value;
+
+ /* Before enabling FBC first need to enable LPT if applicable
+ * LPT state should always be changed (enable/disable) while FBC
+ * is disabled */
+ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
+ (params->source_view_width *
+ params->source_view_height <=
+ dce11_one_lpt_channel_max_resolution)) {
+ dce112_compressor_enable_lpt(compressor);
+ }
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp110->offsets = reg_offsets[params->inst];
+
+ /*Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp110, true);
+ }
+}
+
+void dce112_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce112_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ /* Whenever disabling FBC make sure LPT is disabled if LPT
+ * supported */
+ if (compressor->options.bits.LPT_SUPPORT)
+ dce112_compressor_disable_lpt(compressor);
+
+ wait_for_fbc_state_changed(cp110, false);
+ }
+}
+
+bool dce112_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_MISC);
+ if (get_reg_field_value(value, FBC_MISC, FBC_STOP_ON_HFLIP_EVENT)) {
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst =
+ compressor->attached_inst;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
+{
+ /* Check the hardware register */
+ uint32_t value = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ return get_reg_field_value(
+ value,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+}
+
+void dce112_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ if (compressor->options.bits.LPT_SUPPORT) {
+ uint32_t lpt_alignment = lpt_size_alignment(cp110);
+
+ if (lpt_alignment != 0) {
+ compressed_surf_address_low_part =
+ ((compressed_surf_address_low_part
+ + (lpt_alignment - 1)) / lpt_alignment)
+ * lpt_alignment;
+ }
+ }
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(
+ cp110,
+ params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE11 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce112_compressor_disable_lpt(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t inx;
+
+ /* Disable all pipes LPT Stutter */
+ for (inx = 0; inx < 3; inx++) {
+ value =
+ dm_read_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 0,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
+ value);
+ }
+ /* Disable Underlay pipe LPT Stutter */
+ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Disable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Clear selection of Channel(s) containing Compressed Surface */
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0xFFFFFFFF,
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
+}
+
+void dce112_compressor_enable_lpt(struct compressor *compressor)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t value_control;
+ uint32_t channels;
+
+ /* Enable LPT Stutter from Display pipe */
+ value = dm_read_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 1,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
+
+ /* Enable Underlay pipe LPT Stutter */
+ addr = mmDPGV0_PIPE_STUTTER_CONTROL_NONLPTCH;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ DPGV0_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
+ * will disable LPT.
+ * STCTRL_LPT_TARGETn corresponds to channel n. */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value_control = dm_read_reg(compressor->ctx, addr);
+ channels = get_reg_field_value(value_control,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ channels + 1, /* not mentioned in programming guide,
+ but follow DCE8.1 */
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Enable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce112_compressor_program_lpt_control(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce112_compressor *cp110 = TO_DCE112_COMPRESSOR(compressor);
+ uint32_t rows_per_channel;
+ uint32_t lpt_alignment;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+ uint32_t lpt_control = 0;
+
+ if (!compressor->options.bits.LPT_SUPPORT)
+ return;
+
+ lpt_control = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ /* POSSIBLE VALUES for Low Power Tiling Mode:
+ * 00 - Use channel 0
+ * 01 - Use Channel 0 and 1
+ * 02 - Use Channel 0,1,2,3
+ * 03 - reserved */
+ switch (compressor->lpt_channels_num) {
+ /* case 2:
+ * Use Channel 0 & 1 / Not used for DCE 11 */
+ case 1:
+ /*Use Channel 0 for LPT for DCE 11 */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+ break;
+ default:
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Invalid selected DRAM channels for LPT!!!",
+ __func__);
+ break;
+ }
+
+ lpt_control = lpt_memory_control_config(cp110, lpt_control);
+
+ /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
+ * FBC compressed surface pitch.
+ * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
+ * Surface Pitch) / (Row Size * Number of Channels *
+ * Number of Banks)). */
+ rows_per_channel = 0;
+ lpt_alignment = lpt_size_alignment(cp110);
+ source_view_width =
+ align_to_chunks_number_per_line(
+ cp110,
+ params->source_view_width);
+ source_view_height = (params->source_view_height + 1) & (~0x1);
+
+ if (lpt_alignment != 0) {
+ rows_per_channel = source_view_width * source_view_height * 4;
+ rows_per_channel =
+ (rows_per_channel % lpt_alignment) ?
+ (rows_per_channel / lpt_alignment + 1) :
+ rows_per_channel / lpt_alignment;
+ }
+
+ set_reg_field_value(
+ lpt_control,
+ rows_per_channel,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROWS_PER_CHAN);
+
+ dm_write_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL, lpt_control);
+}
+
+/*
+ * DCE 11 Frame Buffer Compression Implementation
+ */
+
+void dce112_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE11:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE11.1 also needs to set new DCE11.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce112_compressor_construct(struct dce112_compressor *compressor,
+ struct dc_context *ctx)
+{
+ struct dc_bios *bp = ctx->dc_bios;
+ struct embedded_panel_info panel_info;
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+ compressor->base.options.bits.LPT_SUPPORT = true;
+ /* For DCE 11 always use one DRAM channel for LPT */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
+ * should not be supported */
+ if (compressor->base.memory_bus_width == 64)
+ compressor->base.options.bits.LPT_SUPPORT = false;
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+
+ if (BP_RESULT_OK ==
+ bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
+ compressor->base.embedded_panel_h_size =
+ panel_info.lcd_timing.horizontal_addressable;
+ compressor->base.embedded_panel_v_size =
+ panel_info.lcd_timing.vertical_addressable;
+ }
+}
+
+struct compressor *dce112_compressor_create(struct dc_context *ctx)
+{
+ struct dce112_compressor *cp110 =
+ kzalloc(sizeof(struct dce112_compressor), GFP_KERNEL);
+
+ if (!cp110)
+ return NULL;
+
+ dce112_compressor_construct(cp110, ctx);
+ return &cp110->base;
+}
+
+void dce112_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE112_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
new file mode 100644
index 000000000000..f1227133f6df
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h
@@ -0,0 +1,78 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE112_H__
+#define __DC_COMPRESSOR_DCE112_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE112_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce112_compressor, base)
+
+struct dce112_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce112_compressor {
+ struct compressor base;
+ struct dce112_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce112_compressor_create(struct dc_context *ctx);
+
+void dce112_compressor_construct(struct dce112_compressor *cp110,
+ struct dc_context *ctx);
+
+void dce112_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce112_compressor_power_up_fbc(struct compressor *cp);
+
+void dce112_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params);
+
+void dce112_compressor_disable_fbc(struct compressor *cp);
+
+void dce112_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce112_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce112_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce112_compressor_enable_lpt(struct compressor *cp);
+
+void dce112_compressor_disable_lpt(struct compressor *cp);
+
+void dce112_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce112_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
new file mode 100644
index 000000000000..1e4a7c13f0ed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce112_hw_sequencer.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+/* include DCE11.2 register header files */
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+struct dce112_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+
+static const struct dce112_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+static void dce112_init_pte(struct dc_context *ctx)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+
+static bool dce112_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce112_init_pte(ctx);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+void dce112_hw_sequencer_construct(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce112_enable_display_power_gating;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
new file mode 100644
index 000000000000..e646f4a37fa2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE112_H__
+#define __DC_HWSS_DCE112_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce112_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
new file mode 100644
index 000000000000..663e0a047a4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -0,0 +1,1283 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dce110/dce110_resource.h"
+#include "dce110/dce110_timing_generator.h"
+
+#include "irq/dce110/irq_service_dce110.h"
+
+#include "dce/dce_mem_input.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+
+#include "dce/dce_hwseq.h"
+#include "dce112/dce112_hw_sequencer.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#include "reg_helper.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+#include "dce100/dce100_resource.h"
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+ #define mmDP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x4aa7
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x4ba7
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x4ca7
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x4da7
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x4ea7
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4fa7
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x54a7
+ #define mmDP7_DP_DPHY_INTERNAL_CTRL 0x56a7
+ #define mmDP8_DP_DPHY_INTERNAL_CTRL 0x57a7
+#endif
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_BS_SR_SWAP_CNTL
+ #define mmDP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x4ADC
+ #define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x4BDC
+ #define mmDP2_DP_DPHY_BS_SR_SWAP_CNTL 0x4CDC
+ #define mmDP3_DP_DPHY_BS_SR_SWAP_CNTL 0x4DDC
+ #define mmDP4_DP_DPHY_BS_SR_SWAP_CNTL 0x4EDC
+ #define mmDP5_DP_DPHY_BS_SR_SWAP_CNTL 0x4FDC
+ #define mmDP6_DP_DPHY_BS_SR_SWAP_CNTL 0x54DC
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x4ABC
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x4BBC
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x4CBC
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x4DBC
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x4EBC
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4FBC
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x54BC
+#endif
+
+enum dce112_clk_src_array_id {
+ DCE112_CLK_SRC_PLL0,
+ DCE112_CLK_SRC_PLL1,
+ DCE112_CLK_SRC_PLL2,
+ DCE112_CLK_SRC_PLL3,
+ DCE112_CLK_SRC_PLL4,
+ DCE112_CLK_SRC_PLL5,
+
+ DCE112_CLK_SRC_TOTAL
+};
+
+static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE100_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE110_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE112(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE112(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_112_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5)
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(index, id)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCE_112(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps polaris_10_resource_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
+};
+
+static const struct resource_caps polaris_11_resource_cap = {
+ .num_timing_generator = 5,
+ .num_audio = 5,
+ .num_stream_encoder = 5,
+ .num_pll = 8, /* why 8? 6 combo PHY PLL + 2 regular PLLs? */
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x4819
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+
+static struct timing_generator *dce112_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce110_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct stream_encoder *dce112_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE112_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE112_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE112_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce112_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce112_stream_encoder_create,
+ .create_hwseq = dce112_hwseq_create,
+};
+
+#define mi_inst_regs(id) { MI_DCE11_2_REG_LIST(id) }
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE11_2_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE11_2_MASK_SH_LIST(_MASK)
+};
+
+static struct mem_input *dce112_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ return &dce_mi->base;
+}
+
+static void dce112_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce112_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->lb_memory_size = 0x1404; /*5124*/
+ return &transform->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce112_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+static struct input_pixel_processor *dce112_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+struct output_pixel_processor *dce112_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct clock_source *dce112_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce112_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce112_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce112_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce112_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+static struct clock_source *find_matching_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct dc_stream_state *const stream)
+{
+ switch (stream->sink->link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL0];
+ case TRANSMITTER_UNIPHY_B:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL1];
+ case TRANSMITTER_UNIPHY_C:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL2];
+ case TRANSMITTER_UNIPHY_D:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL3];
+ case TRANSMITTER_UNIPHY_E:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL4];
+ case TRANSMITTER_UNIPHY_F:
+ return pool->clock_sources[DCE112_CLK_SRC_PLL5];
+ default:
+ return NULL;
+ };
+
+ return 0;
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce112_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ bool result = false;
+
+ dm_logger_write(
+ dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "%s: start",
+ __func__);
+
+ if (bw_calcs(
+ dc->ctx,
+ dc->bw_dceip,
+ dc->bw_vbios,
+ context->res_ctx.pipe_ctx,
+ dc->res_pool->pipe_count,
+ &context->bw.dce))
+ result = true;
+
+ if (!result)
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_VALIDATION,
+ "%s: Bandwidth validation failed!",
+ __func__);
+
+ if (memcmp(&dc->current_state->bw.dce,
+ &context->bw.dce, sizeof(context->bw.dce))) {
+ struct log_entry log_entry;
+ dm_logger_open(
+ dc->ctx->logger,
+ &log_entry,
+ LOG_BANDWIDTH_CALCS);
+ dm_logger_append(&log_entry, "%s: finish,\n"
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ __func__,
+ context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[0].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[1].a_mark);
+ dm_logger_append(&log_entry,
+ "nbpMark_b: %d nbpMark_a: %d urgentMark_b: %d urgentMark_a: %d\n"
+ "stutMark_b: %d stutMark_a: %d stutter_mode_enable: %d\n",
+ context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw.dce.stutter_mode_enable);
+ dm_logger_append(&log_entry,
+ "cstate: %d pstate: %d nbpstate: %d sync: %d dispclk: %d\n"
+ "sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n",
+ context->bw.dce.cpuc_state_change_enable,
+ context->bw.dce.cpup_state_change_enable,
+ context->bw.dce.nbp_state_change_enable,
+ context->bw.dce.all_displays_in_sync,
+ context->bw.dce.dispclk_khz,
+ context->bw.dce.sclk_khz,
+ context->bw.dce.sclk_deep_sleep_khz,
+ context->bw.dce.yclk_khz,
+ context->bw.dce.blackout_recovery_time_us);
+ dm_logger_close(&log_entry);
+ }
+ return result;
+}
+
+enum dc_status resource_map_phy_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+
+ /* acquire new resources */
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(
+ &context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)
+ || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ pipe_ctx->clock_source =
+ dc->res_pool->dp_clock_source;
+ else
+ pipe_ctx->clock_source = find_matching_pll(
+ &context->res_ctx, dc->res_pool,
+ stream);
+
+ if (pipe_ctx->clock_source == NULL)
+ return DC_NO_CLOCK_SOURCE_RESOURCE;
+
+ resource_reference_clock_source(
+ &context->res_ctx,
+ dc->res_pool,
+ pipe_ctx->clock_source);
+
+ return DC_OK;
+}
+
+static bool dce112_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce112_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dce112_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, context, stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ if (!dce112_validate_bandwidth(dc, context))
+ result = DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+enum dc_status dce112_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce112_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static void dce112_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce112_res_pool_funcs = {
+ .destroy = dce112_destroy_resource_pool,
+ .link_enc_create = dce112_link_encoder_create,
+ .validate_guaranteed = dce112_validate_guaranteed,
+ .validate_bandwidth = dce112_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce112_add_stream_to_ctx,
+ .validate_global = dce112_validate_global
+};
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels_with_latency eng_clks = {0};
+ struct dm_pp_clock_levels_with_latency mem_clks = {0};
+ struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
+ struct dm_pp_clock_levels clks = {0};
+
+ /*do system clock TODO PPLIB: after PPLIB implement,
+ * then remove old way
+ */
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &eng_clks)) {
+
+ /* This is only for temporary */
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &clks);
+ /* convert all the clock fro kHz to fix point mHz */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1], 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels/8], 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*2/8], 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*3/8], 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*4/8], 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*5/8], 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels*6/8], 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0], 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &clks);
+
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ return;
+ }
+
+ /* convert all the clock fro kHz to fix point mHz TODO: wloop data */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ eng_clks.data[0].clocks_in_khz, 1000);
+
+ /*do memory clock*/
+ dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &mem_clks);
+
+ /* we don't need to call PPLIB for validation clock since they
+ * also give us the highest sclk and highest mclk (UMA clock).
+ * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ clk_ranges.num_wm_sets = 4;
+ clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
+ clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B;
+ clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C;
+ clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+
+ clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D;
+ clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+}
+
+const struct resource_caps *dce112_resource_cap(
+ struct hw_asic_id *asic_id)
+{
+ if (ASIC_REV_IS_POLARIS11_M(asic_id->hw_internal_rev) ||
+ ASIC_REV_IS_POLARIS12_V(asic_id->hw_internal_rev))
+ return &polaris_11_resource_cap;
+ else
+ return &polaris_10_resource_cap;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = dce112_resource_cap(&ctx->asic_id);
+ pool->base.funcs = &dce112_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL0] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL1] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL2] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL3] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL4] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCE112_CLK_SRC_PLL5] =
+ dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+ pool->base.clk_src_count = DCE112_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source = dce112_clock_source_create(
+ ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO, &clk_src_regs[0], true);
+
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce112_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ /* get static clock information for PPLIB or firmware, save
+ * max_clock_state
+ */
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce110_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce112_timing_generator_create(
+ ctx,
+ i,
+ &dce112_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce112_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce112_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce112_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce112_opp_create(
+ ctx,
+ i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ /* Create hardware sequencer */
+ dce112_hw_sequencer_construct(dc);
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce112_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
new file mode 100644
index 000000000000..d5c19d34eb0a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -0,0 +1,61 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE112_H__
+#define __DC_RESOURCE_DCE112_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce112_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+enum dc_status dce112_validate_with_context(
+ struct dc *dc,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *context,
+ struct dc_state *old_context);
+
+enum dc_status dce112_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context);
+
+bool dce112_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
+enum dc_status dce112_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+
+
+#endif /* __DC_RESOURCE_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
new file mode 100644
index 000000000000..1779b963525c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+
+DCE120 = dce120_resource.o dce120_timing_generator.o \
+dce120_hw_sequencer.o
+
+AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE120) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
new file mode 100644
index 000000000000..1a0b54d6034e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce120_hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+#include "reg_helper.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+struct dce120_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce120_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC0_CRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+#define CNTL_ID(controller_id)\
+ controller_id
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+#if 0
+static void dce120_init_pte(struct dc_context *ctx, uint8_t controller_id)
+{
+ uint32_t addr;
+ uint32_t value = 0;
+ uint32_t chunk_int = 0;
+ uint32_t chunk_mul = 0;
+/*
+ addr = mmDCP0_DVMM_PTE_CONTROL + controller_id *
+ (mmDCP1_DVMM_PTE_CONTROL- mmDCP0_DVMM_PTE_CONTROL);
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value, 0, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_USE_SINGLE_PTE);
+
+ set_reg_field_value_soc15(
+ value, 1, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE0);
+
+ set_reg_field_value_soc15(
+ value, 1, DCP, controller_id,
+ DVMM_PTE_CONTROL,
+ DVMM_PTE_BUFFER_MODE1);
+
+ dm_write_reg(ctx, addr, value);*/
+
+ addr = mmDVMM_PTE_REQ;
+ value = dm_read_reg(ctx, addr);
+
+ chunk_int = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ chunk_mul = get_reg_field_value(
+ value,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ if (chunk_int != 0x4 || chunk_mul != 0x4) {
+
+ set_reg_field_value(
+ value,
+ 255,
+ DVMM_PTE_REQ,
+ MAX_PTEREQ_TO_ISSUE);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_INT);
+
+ set_reg_field_value(
+ value,
+ 4,
+ DVMM_PTE_REQ,
+ HFLIP_PTEREQ_PER_CHUNK_MULTIPLIER);
+
+ dm_write_reg(ctx, addr, value);
+ }
+}
+#endif
+
+static bool dce120_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ /* disable for bringup */
+#if 0
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ return true;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (power_gating != PIPE_GATING_CONTROL_INIT || controller_id == 0) {
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmCRTC0_CRTC_MASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (power_gating != PIPE_GATING_CONTROL_ENABLE)
+ dce120_init_pte(ctx, controller_id);
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+#endif
+ return false;
+}
+
+static void dce120_update_dchub(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data)
+{
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE_2(DCHUB_FB_LOCATION,
+ FB_TOP, 0,
+ FB_BASE, 0x0FFFF);
+
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, (dh_data->zfb_mc_base_addr + dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUB_AGP_BASE,
+ AGP_BASE, 0);
+
+ REG_UPDATE(DCHUB_AGP_BOT,
+ AGP_BOT, 0x03FFFF);
+
+ REG_UPDATE(DCHUB_AGP_TOP,
+ AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+
+
+void dce120_hw_sequencer_construct(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce110_hw_sequencer_construct(dc);
+ dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
+ dc->hwss.update_dchub = dce120_update_dchub;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
new file mode 100644
index 000000000000..77a6b86d7606
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE120_H__
+#define __DC_HWSS_DCE120_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce120_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE112_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
new file mode 100644
index 000000000000..5c48c22d9d98
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -0,0 +1,1004 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.cls
+*
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+
+#include "stream_encoder.h"
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dce120_resource.h"
+#include "dce112/dce112_resource.h"
+
+#include "dce110/dce110_resource.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce120_timing_generator.h"
+#include "irq/dce120/irq_service_dce120.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_mem_input.h"
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce120/dce120_hw_sequencer.h"
+#include "dce/dce_transform.h"
+
+#include "dce/dce_audio.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_hwseq.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+#include "vega10/NBIO/nbio_6_1_offset.h"
+#include "reg_helper.h"
+
+#include "dce100/dce100_resource.h"
+
+#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#endif
+
+enum dce120_clk_src_array_id {
+ DCE120_CLK_SRC_PLL0,
+ DCE120_CLK_SRC_PLL1,
+ DCE120_CLK_SRC_PLL2,
+ DCE120_CLK_SRC_PLL3,
+ DCE120_CLK_SRC_PLL4,
+ DCE120_CLK_SRC_PLL5,
+
+ DCE120_CLK_SRC_TOTAL
+};
+
+static const struct dce110_timing_generator_offsets dce120_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC0_CRTC_CONTROL),
+ }
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE_INNER(seg) \
+ NBIF_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCE110_COMMON_REG_LIST()
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCE110(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_DCE110_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_DCE120_MASK_SH_LIST_SOC_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_DCE120_MASK_SH_LIST_SOC_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE110(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_SOC_BASE(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_SOC_BASE(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE120_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE120(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_120_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_120(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5)
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(index, id)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCE_112(id),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D),
+ clk_src_regs(4, E),
+ clk_src_regs(5, F)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_112(_MASK)
+};
+
+struct output_pixel_processor *dce120_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 7,
+ .num_stream_encoder = 6,
+ .num_pll = 6,
+};
+
+static const struct dc_debug debug_defaults = {
+ .disable_clock_gate = true,
+};
+
+struct clock_source *dce120_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(*clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce120_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+
+bool dce120_hw_sequencer_create(struct dc *dc)
+{
+ /* All registers used by dce11.2 match those in dce11 in offset and
+ * structure
+ */
+ dce120_hw_sequencer_construct(dc);
+
+ /*TODO Move to separate file and Override what is needed */
+
+ return true;
+}
+
+static struct timing_generator *dce120_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce120_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static void dce120_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce120_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL)
+ dce120_clock_source_destroy(
+ &pool->base.clock_sources[i]);
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce120_clock_source_destroy(&pool->base.dp_clock_source);
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ uint32_t reg_val = dm_read_reg_soc15(ctx, mmCC_DC_MISC_STRAPS, 0);
+
+ straps->audio_stream_number = get_reg_field_value(reg_val,
+ CC_DC_MISC_STRAPS,
+ AUDIO_STREAM_NUMBER);
+ straps->hdmi_disable = get_reg_field_value(reg_val,
+ CC_DC_MISC_STRAPS,
+ HDMI_DISABLE);
+
+ reg_val = dm_read_reg_soc15(ctx, mmDC_PINSTRAPS, 0);
+ straps->dc_pinstraps_audio = get_reg_field_value(reg_val,
+ DC_PINSTRAPS,
+ DC_PINSTRAPS_AUDIO);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+static struct link_encoder *dce120_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+
+ return &enc110->base;
+}
+
+static struct input_pixel_processor *dce120_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static struct stream_encoder *dce120_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE120_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE12_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE12_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce120_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce120_stream_encoder_create,
+ .create_hwseq = dce120_hwseq_create,
+};
+
+#define mi_inst_regs(id) { MI_DCE12_REG_LIST(id) }
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE12_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE12_MASK_SH_LIST(_MASK)
+};
+
+static struct mem_input *dce120_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ return &dce_mi->base;
+}
+
+static struct transform *dce120_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->lb_memory_size = 0x1404; /*5124*/
+ return &transform->base;
+}
+
+static void dce120_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce120_res_pool_funcs = {
+ .destroy = dce120_destroy_resource_pool,
+ .link_enc_create = dce120_link_encoder_create,
+ .validate_guaranteed = dce112_validate_guaranteed,
+ .validate_bandwidth = dce112_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce112_add_stream_to_ctx
+};
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+ struct dm_pp_clock_levels_with_latency eng_clks = {0};
+ struct dm_pp_clock_levels_with_latency mem_clks = {0};
+ struct dm_pp_wm_sets_with_clock_ranges clk_ranges = {0};
+ int i;
+ unsigned int clk;
+ unsigned int latency;
+
+ /*do system clock*/
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK,
+ &eng_clks) || eng_clks.num_levels == 0) {
+
+ eng_clks.num_levels = 8;
+ clk = 300000;
+
+ for (i = 0; i < eng_clks.num_levels; i++) {
+ eng_clks.data[i].clocks_in_khz = clk;
+ clk += 100000;
+ }
+ }
+
+ /* convert all the clock fro kHz to fix point mHz TODO: wloop data */
+ dc->bw_vbios->high_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels-1].clocks_in_khz, 1000);
+ dc->bw_vbios->mid1_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid2_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*2/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid3_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid4_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*4/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid5_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*5/8].clocks_in_khz, 1000);
+ dc->bw_vbios->mid6_sclk = bw_frc_to_fixed(
+ eng_clks.data[eng_clks.num_levels*6/8].clocks_in_khz, 1000);
+ dc->bw_vbios->low_sclk = bw_frc_to_fixed(
+ eng_clks.data[0].clocks_in_khz, 1000);
+
+ /*do memory clock*/
+ if (!dm_pp_get_clock_levels_by_type_with_latency(
+ dc->ctx,
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ &mem_clks) || mem_clks.num_levels == 0) {
+
+ mem_clks.num_levels = 3;
+ clk = 250000;
+ latency = 45;
+
+ for (i = 0; i < eng_clks.num_levels; i++) {
+ mem_clks.data[i].clocks_in_khz = clk;
+ mem_clks.data[i].latency_in_us = latency;
+ clk += 500000;
+ latency -= 5;
+ }
+
+ }
+
+ /* we don't need to call PPLIB for validation clock since they
+ * also give us the highest sclk and highest mclk (UMA clock).
+ * ALSO always convert UMA clock (from PPLIB) to YCLK (HW formula):
+ * YCLK = UMACLK*m_memoryTypeMultiplier
+ */
+ dc->bw_vbios->low_yclk = bw_frc_to_fixed(
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+ dc->bw_vbios->high_yclk = bw_frc_to_fixed(
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ 1000);
+
+ /* Now notify PPLib/SMU about which Watermarks sets they should select
+ * depending on DPM state they are in. And update BW MGR GFX Engine and
+ * Memory clock member variables for Watermarks calculations for each
+ * Watermark Set
+ */
+ clk_ranges.num_wm_sets = 4;
+ clk_ranges.wm_clk_ranges[0].wm_set_id = WM_SET_A;
+ clk_ranges.wm_clk_ranges[0].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[0].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[0].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[1].wm_set_id = WM_SET_B;
+ clk_ranges.wm_clk_ranges[1].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[1].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[1].wm_min_memg_clk_in_khz =
+ mem_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[1].wm_max_mem_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz - 1;
+
+ clk_ranges.wm_clk_ranges[2].wm_set_id = WM_SET_C;
+ clk_ranges.wm_clk_ranges[2].wm_min_eng_clk_in_khz =
+ eng_clks.data[0].clocks_in_khz;
+ clk_ranges.wm_clk_ranges[2].wm_max_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz - 1;
+ clk_ranges.wm_clk_ranges[2].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[2].wm_max_mem_clk_in_khz = 5000000;
+
+ clk_ranges.wm_clk_ranges[3].wm_set_id = WM_SET_D;
+ clk_ranges.wm_clk_ranges[3].wm_min_eng_clk_in_khz =
+ eng_clks.data[eng_clks.num_levels*3/8].clocks_in_khz;
+ /* 5 GHz instead of data[7].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_eng_clk_in_khz = 5000000;
+ clk_ranges.wm_clk_ranges[3].wm_min_memg_clk_in_khz =
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz;
+ /* 5 GHz instead of data[2].clockInKHz to cover Overdrive */
+ clk_ranges.wm_clk_ranges[3].wm_max_mem_clk_in_khz = 5000000;
+
+ /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
+ dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges);
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct irq_service_init_data irq_init_data;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce120_res_pool_funcs;
+
+ /* TODO: Fill more data from GreenlandAsicCapability.cpp */
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 128;
+ dc->debug = debug_defaults;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL0] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL1] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL2] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL3] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL4] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
+ pool->base.clock_sources[DCE120_CLK_SRC_PLL5] =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ &clk_src_regs[5], false);
+ pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source =
+ dce120_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto clk_src_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce120_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto disp_clk_create_fail;
+ }
+
+ pool->base.dmcu = dce_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ irq_init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data);
+ if (!pool->base.irqs)
+ goto irqs_create_fail;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] =
+ dce120_timing_generator_create(
+ ctx,
+ i,
+ &dce120_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.mis[i] = dce120_mem_input_create(ctx, i);
+
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.ipps[i] = dce120_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto controller_create_fail;
+ }
+
+ pool->base.transforms[i] = dce120_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce120_opp_create(
+ ctx,
+ i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ }
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ if (!dce120_hw_sequencer_create(dc))
+ goto controller_create_fail;
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
+
+ bw_calcs_data_update_from_pplib(dc);
+
+ return true;
+
+irqs_create_fail:
+controller_create_fail:
+disp_clk_create_fail:
+clk_src_create_fail:
+res_create_fail:
+
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
new file mode 100644
index 000000000000..3d1f3cf012f4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
@@ -0,0 +1,39 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE120_H__
+#define __DC_RESOURCE_DCE120_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce120_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCE120_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
new file mode 100644
index 000000000000..2502182d5e82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -0,0 +1,1174 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "dc_types.h"
+#include "dc_bios_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "dce120_timing_generator.h"
+
+#include "timing_generator.h"
+
+#define CRTC_REG_UPDATE_N(reg_name, n, ...) \
+ generic_reg_update_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__)
+
+#define CRTC_REG_SET_N(reg_name, n, ...) \
+ generic_reg_set_soc15(tg110->base.ctx, tg110->offsets.crtc, reg_name, n, __VA_ARGS__)
+
+#define CRTC_REG_UPDATE(reg, field, val) \
+ CRTC_REG_UPDATE_N(reg, 1, FD(reg##__##field), val)
+
+#define CRTC_REG_UPDATE_2(reg, field1, val1, field2, val2) \
+ CRTC_REG_UPDATE_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2)
+
+#define CRTC_REG_UPDATE_3(reg, field1, val1, field2, val2, field3, val3) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
+
+#define CRTC_REG_UPDATE_4(reg, field1, val1, field2, val2, field3, val3, field4, val4) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4)
+
+#define CRTC_REG_UPDATE_5(reg, field1, val1, field2, val2, field3, val3, field4, val4, field5, val5) \
+ CRTC_REG_UPDATE_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3, FD(reg##__##field4), val4, FD(reg##__##field5), val5)
+
+#define CRTC_REG_SET(reg, field, val) \
+ CRTC_REG_SET_N(reg, 1, FD(reg##__##field), val)
+
+#define CRTC_REG_SET_2(reg, field1, val1, field2, val2) \
+ CRTC_REG_SET_N(reg, 2, FD(reg##__##field1), val1, FD(reg##__##field2), val2)
+
+#define CRTC_REG_SET_3(reg, field1, val1, field2, val2, field3, val3) \
+ CRTC_REG_SET_N(reg, 3, FD(reg##__##field1), val1, FD(reg##__##field2), val2, FD(reg##__##field3), val3)
+
+/**
+ *****************************************************************************
+ * Function: is_in_vertical_blank
+ *
+ * @brief
+ * check the current status of CRTC to check if we are in Vertical Blank
+ * regioneased" state
+ *
+ * @return
+ * true if currently in blank region, false otherwise
+ *
+ *****************************************************************************
+ */
+static bool dce120_timing_generator_is_in_vertical_blank(
+ struct timing_generator *tg)
+{
+ uint32_t field = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS,
+ tg110->offsets.crtc);
+
+ field = get_reg_field_value(value, CRTC0_CRTC_STATUS, CRTC_V_BLANK);
+ return field == 1;
+}
+
+
+/* determine if given timing can be supported by TG */
+bool dce120_timing_generator_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ enum signal_type signal)
+{
+ uint32_t interlace_factor = timing->flags.INTERLACE ? 2 : 1;
+ uint32_t v_blank =
+ (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom) *
+ interlace_factor;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ if (!dce110_timing_generator_validate_timing(
+ tg,
+ timing,
+ signal))
+ return false;
+
+
+ if (v_blank < tg110->min_v_blank ||
+ timing->h_sync_width < tg110->min_h_sync_width ||
+ timing->v_sync_width < tg110->min_v_sync_width)
+ return false;
+
+ return true;
+}
+
+bool dce120_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ return dce120_timing_generator_validate_timing(tg, timing, SIGNAL_TYPE_NONE);
+}
+
+/******** HW programming ************/
+/* Disable/Enable Timing Generator */
+bool dce120_timing_generator_enable_crtc(struct timing_generator *tg)
+{
+ enum bp_result result;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Set MASTER_UPDATE_MODE to 0
+ * This is needed for DRR, and also suggested to be default value by Syed.*/
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_MODE,
+ MASTER_UPDATE_MODE, 0);
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_MASTER_UPDATE_LOCK,
+ UNDERFLOW_UPDATE_LOCK, 0);
+
+ /* TODO API for AtomFirmware didn't change*/
+ result = tg->bp->funcs->enable_crtc(tg->bp, tg110->controller_id, true);
+
+ return result == BP_RESULT_OK;
+}
+
+void dce120_timing_generator_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE(CRTC0_CRTC_CONTROL,
+ CRTC_HBLANK_EARLY_CONTROL, early_cntl);
+}
+
+/**************** TG current status ******************/
+
+/* return the current frame counter. Used by Linux kernel DRM */
+uint32_t dce120_timing_generator_get_vblank_counter(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_FRAME_COUNT,
+ tg110->offsets.crtc);
+ uint32_t field = get_reg_field_value(
+ value, CRTC0_CRTC_STATUS_FRAME_COUNT, CRTC_FRAME_COUNT);
+
+ return field;
+}
+
+/* Get current H and V position */
+void dce120_timing_generator_get_crtc_position(
+ struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_POSITION,
+ tg110->offsets.crtc);
+
+ position->horizontal_count = get_reg_field_value(value,
+ CRTC0_CRTC_STATUS_POSITION, CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(value,
+ CRTC0_CRTC_STATUS_POSITION, CRTC_VERT_COUNT);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_NOM_VERT_POSITION,
+ tg110->offsets.crtc);
+
+ position->nominal_vcount = get_reg_field_value(value,
+ CRTC0_CRTC_NOM_VERT_POSITION, CRTC_VERT_COUNT_NOM);
+}
+
+/* wait until TG is in beginning of vertical blank region */
+void dce120_timing_generator_wait_for_vblank(struct timing_generator *tg)
+{
+ /* We want to catch beginning of VBlank here, so if the first try are
+ * in VBlank, we might be very close to Active, in this case wait for
+ * another frame
+ */
+ while (dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+
+ while (!dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/* wait until TG is in beginning of active region */
+void dce120_timing_generator_wait_for_vactive(struct timing_generator *tg)
+{
+ while (dce120_timing_generator_is_in_vertical_blank(tg)) {
+ if (!tg->funcs->is_counter_moving(tg)) {
+ /* error - no point to wait if counter is not moving */
+ break;
+ }
+ }
+}
+
+/*********** Timing Generator Synchronization routines ****/
+
+/* Setups Global Swap Lock group, TimingServer or TimingClient*/
+void dce120_timing_generator_setup_global_swap_lock(
+ struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value_crtc_vtotal =
+ dm_read_reg_soc15(tg->ctx,
+ mmCRTC0_CRTC_V_TOTAL,
+ tg110->offsets.crtc);
+ /* Checkpoint relative to end of frame */
+ uint32_t check_point =
+ get_reg_field_value(value_crtc_vtotal,
+ CRTC0_CRTC_V_TOTAL,
+ CRTC_V_TOTAL);
+
+
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_GSL_WINDOW, tg110->offsets.crtc, 0);
+
+ CRTC_REG_UPDATE_N(DCP0_DCP_GSL_CONTROL, 6,
+ /* This pipe will belong to GSL Group zero. */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 1,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), gsl_params->gsl_master == tg->inst,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY,
+ /* Keep signal low (pending high) during 6 lines.
+ * Also defines minimum interval before re-checking signal. */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY,
+ /* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 1);
+
+ CRTC_REG_SET_2(
+ CRTC0_CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM, check_point - FLIP_READY_BACK_LOOKUP,
+ CRTC_GSL_FORCE_DELAY, VFLIP_READY_DELAY);
+}
+
+/* Clear all the register writes done by setup_global_swap_lock */
+void dce120_timing_generator_tear_down_global_swap_lock(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ /* Settig HW default values from reg specs */
+ CRTC_REG_SET_N(DCP0_DCP_GSL_CONTROL, 6,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL0_EN), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_MASTER_EN), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_FORCE_DELAY), HFLIP_READY_DELAY,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_HSYNC_FLIP_CHECK_DELAY), HFLIP_CHECK_DELAY,
+ /* DCP_GSL_PURPOSE_SURFACE_FLIP */
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_SYNC_SOURCE), 0,
+ FD(DCP0_DCP_GSL_CONTROL__DCP_GSL_DELAY_SURFACE_UPDATE_PENDING), 0);
+
+ CRTC_REG_SET_2(CRTC0_CRTC_GSL_CONTROL,
+ CRTC_GSL_CHECK_LINE_NUM, 0,
+ CRTC_GSL_FORCE_DELAY, 0x2); /*TODO Why this value here ?*/
+}
+
+/* Reset slave controllers on master VSync */
+void dce120_timing_generator_enable_reset_trigger(
+ struct timing_generator *tg,
+ int source)
+{
+ enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t rising_edge = 0;
+ uint32_t falling_edge = 0;
+ /* Setup trigger edge */
+ uint32_t pol_value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_V_SYNC_A_CNTL,
+ tg110->offsets.crtc);
+
+ /* Register spec has reversed definition:
+ * 0 for positive, 1 for negative */
+ if (get_reg_field_value(pol_value,
+ CRTC0_CRTC_V_SYNC_A_CNTL,
+ CRTC_V_SYNC_A_POL) == 0) {
+ rising_edge = 1;
+ } else {
+ falling_edge = 1;
+ }
+
+ /* TODO What about other sources ?*/
+ trig_src_select = TRIGGER_SOURCE_SELECT_GSL_GROUP0;
+
+ CRTC_REG_UPDATE_N(CRTC0_CRTC_TRIGB_CNTL, 7,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_SOURCE_SELECT), trig_src_select,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_POLARITY_SELECT), TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_RISING_EDGE_DETECT_CNTL), rising_edge,
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FALLING_EDGE_DETECT_CNTL), falling_edge,
+ /* send every signal */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_FREQUENCY_SELECT), 0,
+ /* no delay */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_DELAY), 0,
+ /* clear trigger status */
+ FD(CRTC0_CRTC_TRIGB_CNTL__CRTC_TRIGB_CLEAR), 1);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE, 2,
+ CRTC_FORCE_COUNT_NOW_TRIG_SEL, 1,
+ CRTC_FORCE_COUNT_NOW_CLEAR, 1);
+}
+
+/* disabling trigger-reset */
+void dce120_timing_generator_disable_reset_trigger(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_MODE, 0,
+ CRTC_FORCE_COUNT_NOW_CLEAR, 1);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_TRIGB_CNTL,
+ CRTC_TRIGB_SOURCE_SELECT, TRIGGER_SOURCE_SELECT_LOGIC_ZERO,
+ CRTC_TRIGB_POLARITY_SELECT, TRIGGER_POLARITY_SELECT_LOGIC_ZERO,
+ /* clear trigger status */
+ CRTC_TRIGB_CLEAR, 1);
+
+}
+
+/* Checks whether CRTC triggered reset occurred */
+bool dce120_timing_generator_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ tg110->offsets.crtc);
+
+ return get_reg_field_value(value,
+ CRTC0_CRTC_FORCE_COUNT_NOW_CNTL,
+ CRTC_FORCE_COUNT_NOW_OCCURRED) != 0;
+}
+
+
+/******** Stuff to move to other virtual HW objects *****************/
+/* Move to enable accelerated mode */
+void dce120_timing_generator_disable_vga(struct timing_generator *tg)
+{
+ uint32_t offset = 0;
+ uint32_t value = 0;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ switch (tg110->controller_id) {
+ case CONTROLLER_ID_D0:
+ offset = 0;
+ break;
+ case CONTROLLER_ID_D1:
+ offset = mmD2VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D2:
+ offset = mmD3VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D3:
+ offset = mmD4VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D4:
+ offset = mmD5VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ case CONTROLLER_ID_D5:
+ offset = mmD6VGA_CONTROL - mmD1VGA_CONTROL;
+ break;
+ default:
+ break;
+ }
+
+ value = dm_read_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset);
+
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_MODE_ENABLE);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_TIMING_SELECT);
+ set_reg_field_value(
+ value, 0, D1VGA_CONTROL, D1VGA_SYNC_POLARITY_SELECT);
+ set_reg_field_value(value, 0, D1VGA_CONTROL, D1VGA_OVERSCAN_COLOR_EN);
+
+ dm_write_reg_soc15(tg->ctx, mmD1VGA_CONTROL, offset, value);
+}
+/* TODO: Should we move it to transform */
+/* Fully program CRTC timing in timing generator */
+void dce120_timing_generator_program_blanking(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t tmp1 = 0;
+ uint32_t tmp2 = 0;
+ uint32_t vsync_offset = timing->v_border_bottom +
+ timing->v_front_porch;
+ uint32_t v_sync_start = timing->v_addressable + vsync_offset;
+
+ uint32_t hsync_offset = timing->h_border_right +
+ timing->h_front_porch;
+ uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_H_TOTAL,
+ CRTC_H_TOTAL,
+ timing->h_total - 1);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL,
+ CRTC_V_TOTAL,
+ timing->v_total - 1);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure V_TOTAL_MAX and
+ * V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX,
+ timing->v_total - 1);
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN,
+ timing->v_total - 1);
+
+ tmp1 = timing->h_total -
+ (h_sync_start + timing->h_border_left);
+ tmp2 = tmp1 + timing->h_addressable +
+ timing->h_border_left + timing->h_border_right;
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_H_BLANK_START_END,
+ CRTC_H_BLANK_END, tmp1,
+ CRTC_H_BLANK_START, tmp2);
+
+ tmp1 = timing->v_total - (v_sync_start + timing->v_border_top);
+ tmp2 = tmp1 + timing->v_addressable + timing->v_border_top +
+ timing->v_border_bottom;
+
+ CRTC_REG_UPDATE_2(
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END, tmp1,
+ CRTC_V_BLANK_START, tmp2);
+}
+
+/* TODO: Should we move it to opp? */
+/* Combine with below and move YUV/RGB color conversion to SW layer */
+void dce120_timing_generator_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ CRTC_BLACK_COLOR_G_Y, black_color->color_g_y,
+ CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
+}
+/* Combine with above and move YUV/RGB color conversion to SW layer */
+void dce120_timing_generator_set_overscan_color_black(
+ struct timing_generator *tg,
+ const struct tg_color *color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+ CRTC_REG_SET_3(
+ CRTC0_CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE, color->color_b_cb,
+ CRTC_OVERSCAN_COLOR_GREEN, color->color_g_y,
+ CRTC_OVERSCAN_COLOR_RED, color->color_r_cr);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_OVERSCAN_COLOR,
+ tg110->offsets.crtc);
+
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLACK_COLOR,
+ tg110->offsets.crtc,
+ value);
+
+ /* This is desirable to have a constant DAC output voltage during the
+ * blank time that is higher than the 0 volt reference level that the
+ * DAC outputs when the NBLANK signal
+ * is asserted low, such as for output to an analog TV. */
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_DATA_COLOR,
+ tg110->offsets.crtc,
+ value);
+
+ /* TO DO we have to program EXT registers and we need to know LB DATA
+ * format because it is used when more 10 , i.e. 12 bits per color
+ *
+ * m_mmDxCRTC_OVERSCAN_COLOR_EXT
+ * m_mmDxCRTC_BLACK_COLOR_EXT
+ * m_mmDxCRTC_BLANK_DATA_COLOR_EXT
+ */
+}
+
+void dce120_timing_generator_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN, params->vertical_total_min - 1);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX, params->vertical_total_max - 1);
+ CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 6,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 1,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 1,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK_EN), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK,
+ 0x180);
+
+ } else {
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MIN,
+ CRTC_V_TOTAL_MIN, 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_V_TOTAL_MAX,
+ CRTC_V_TOTAL_MAX, 0);
+ CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0,
+ FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0);
+ CRTC_REG_UPDATE(
+ CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK,
+ 0);
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: dce120_timing_generator_get_position
+ *
+ * @brief
+ * Returns CRTC vertical/horizontal counters
+ *
+ * @param [out] position
+ *****************************************************************************
+ */
+void dce120_timing_generator_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_STATUS_POSITION,
+ tg110->offsets.crtc);
+
+ position->horizontal_count = get_reg_field_value(
+ value,
+ CRTC0_CRTC_STATUS_POSITION,
+ CRTC_HORZ_COUNT);
+
+ position->vertical_count = get_reg_field_value(
+ value,
+ CRTC0_CRTC_STATUS_POSITION,
+ CRTC_VERT_COUNT);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_NOM_VERT_POSITION,
+ tg110->offsets.crtc);
+
+ position->nominal_vcount = get_reg_field_value(
+ value,
+ CRTC0_CRTC_NOM_VERT_POSITION,
+ CRTC_VERT_COUNT_NOM);
+}
+
+
+void dce120_timing_generator_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ uint32_t v_blank_start_end = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_V_BLANK_START_END,
+ tg110->offsets.crtc);
+
+ *v_blank_start = get_reg_field_value(v_blank_start_end,
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_START);
+ *v_blank_end = get_reg_field_value(v_blank_start_end,
+ CRTC0_CRTC_V_BLANK_START_END,
+ CRTC_V_BLANK_END);
+
+ dce120_timing_generator_get_crtc_position(
+ tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+void dce120_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_sync_width_and_b_porch =
+ timing->v_total - timing->v_addressable -
+ timing->v_border_bottom - timing->v_front_porch;
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_START_LINE_CONTROL,
+ tg110->offsets.crtc);
+
+ set_reg_field_value(
+ value,
+ enable ? 0 : 1,
+ CRTC0_CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+
+ /* Program advanced line position acc.to the best case from fetching data perspective to hide MC latency
+ * and prefilling Line Buffer in V Blank (to 10 lines as LB can store max 10 lines)
+ */
+ if (v_sync_width_and_b_porch > 10)
+ v_sync_width_and_b_porch = 10;
+
+ set_reg_field_value(
+ value,
+ v_sync_width_and_b_porch,
+ CRTC0_CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+
+ dm_write_reg_soc15(tg->ctx,
+ mmCRTC0_CRTC_START_LINE_CONTROL,
+ tg110->offsets.crtc,
+ value);
+}
+
+void dce120_tg_program_blank_color(struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = 0;
+
+ CRTC_REG_UPDATE_3(
+ CRTC0_CRTC_BLACK_COLOR,
+ CRTC_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ CRTC_BLACK_COLOR_G_Y, black_color->color_g_y,
+ CRTC_BLACK_COLOR_R_CR, black_color->color_r_cr);
+
+ value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLACK_COLOR,
+ tg110->offsets.crtc);
+ dm_write_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_DATA_COLOR,
+ tg110->offsets.crtc,
+ value);
+}
+
+void dce120_tg_set_overscan_color(struct timing_generator *tg,
+ const struct tg_color *overscan_color)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_SET_3(
+ CRTC0_CRTC_OVERSCAN_COLOR,
+ CRTC_OVERSCAN_COLOR_BLUE, overscan_color->color_b_cb,
+ CRTC_OVERSCAN_COLOR_GREEN, overscan_color->color_g_y,
+ CRTC_OVERSCAN_COLOR_RED, overscan_color->color_r_cr);
+}
+
+void dce120_tg_program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (use_vbios)
+ dce110_timing_generator_program_timing_generator(tg, timing);
+ else
+ dce120_timing_generator_program_blanking(tg, timing);
+}
+
+bool dce120_tg_is_blanked(struct timing_generator *tg)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t value = dm_read_reg_soc15(
+ tg->ctx,
+ mmCRTC0_CRTC_BLANK_CONTROL,
+ tg110->offsets.crtc);
+
+ if (get_reg_field_value(
+ value,
+ CRTC0_CRTC_BLANK_CONTROL,
+ CRTC_BLANK_DATA_EN) == 1 &&
+ get_reg_field_value(
+ value,
+ CRTC0_CRTC_BLANK_CONTROL,
+ CRTC_CURRENT_BLANK_STATE) == 1)
+ return true;
+
+ return false;
+}
+
+void dce120_tg_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_SET(
+ CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+
+ if (enable_blanking)
+ CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
+ else
+ dm_write_reg_soc15(tg->ctx, mmCRTC0_CRTC_BLANK_CONTROL,
+ tg110->offsets.crtc, 0);
+}
+
+bool dce120_tg_validate_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+
+void dce120_tg_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ dce120_timing_generator_wait_for_vblank(tg);
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ dce120_timing_generator_wait_for_vactive(tg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dce120_tg_set_colors(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color)
+{
+ if (blank_color != NULL)
+ dce120_tg_program_blank_color(tg, blank_color);
+
+ if (overscan_color != NULL)
+ dce120_tg_set_overscan_color(tg, overscan_color);
+}
+
+static void dce120_timing_generator_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_STATIC_SCREEN_CONTROL,
+ CRTC_STATIC_SCREEN_EVENT_MASK, value,
+ CRTC_STATIC_SCREEN_FRAME_COUNT, 2);
+}
+
+void dce120_timing_generator_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dc_context *ctx = tg->ctx;
+ uint32_t value;
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+
+ CRTC_REG_UPDATE_2(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_HRES, 6);
+
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, dyn_range,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, 0);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ value = 0;
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ set_reg_field_value(
+ value,
+ (1 << index),
+ CRTC0_CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_MASK);
+ /* write color component */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ set_reg_field_value(
+ value,
+ dst_color[index],
+ CRTC0_CRTC_TEST_PATTERN_COLOR,
+ CRTC_TEST_PATTERN_DATA);
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+
+ /* enable test pattern */
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, 0,
+ CRTC_TEST_PATTERN_HRES, 6,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, 0,
+ CRTC_TEST_PATTERN_HRES, 8,
+ CRTC_TEST_PATTERN_VRES, 6,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ CRTC_REG_UPDATE_5(CRTC0_CRTC_TEST_PATTERN_PARAMETERS,
+ CRTC_TEST_PATTERN_INC0, inc_base,
+ CRTC_TEST_PATTERN_INC1, inc_base + 2,
+ CRTC_TEST_PATTERN_HRES, 8,
+ CRTC_TEST_PATTERN_VRES, 5,
+ CRTC_TEST_PATTERN_RAMP0_OFFSET, 384 << 6);
+ }
+ break;
+ default:
+ break;
+ }
+
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, 0);
+
+ /* enable test pattern */
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, 0);
+
+ CRTC_REG_UPDATE_4(CRTC0_CRTC_TEST_PATTERN_CONTROL,
+ CRTC_TEST_PATTERN_EN, 1,
+ CRTC_TEST_PATTERN_MODE, mode,
+ CRTC_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ CRTC_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ value = 0;
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_CONTROL, tg110->offsets.crtc, value);
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_COLOR, tg110->offsets.crtc, value);
+ dm_write_reg_soc15(ctx, mmCRTC0_CRTC_TEST_PATTERN_PARAMETERS, tg110->offsets.crtc, value);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool dce120_arm_vert_intr(
+ struct timing_generator *tg,
+ uint8_t width)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t v_blank_start, v_blank_end, h_position, v_position;
+
+ tg->funcs->get_scanoutpos(
+ tg,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ if (v_blank_start == 0 || v_blank_end == 0)
+ return false;
+
+ CRTC_REG_SET_2(
+ CRTC0_CRTC_VERTICAL_INTERRUPT0_POSITION,
+ CRTC_VERTICAL_INTERRUPT0_LINE_START, v_blank_start,
+ CRTC_VERTICAL_INTERRUPT0_LINE_END, v_blank_start + width);
+
+ return true;
+}
+
+static const struct timing_generator_funcs dce120_tg_funcs = {
+ .validate_timing = dce120_tg_validate_timing,
+ .program_timing = dce120_tg_program_timing,
+ .enable_crtc = dce120_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ /* never be called */
+ .get_position = dce120_timing_generator_get_crtc_position,
+ .get_frame_count = dce120_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce120_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce120_timing_generator_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = dce120_tg_wait_for_state,
+ .set_blank = dce120_tg_set_blank,
+ .is_blanked = dce120_tg_is_blanked,
+ /* never be called */
+ .set_colors = dce120_tg_set_colors,
+ .set_overscan_blank_color = dce120_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce120_timing_generator_program_blank_color,
+ .disable_vga = dce120_timing_generator_disable_vga,
+ .did_triggered_reset_occur = dce120_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock = dce120_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce120_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce120_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock = dce120_timing_generator_tear_down_global_swap_lock,
+ .enable_advanced_request = dce120_timing_generator_enable_advanced_request,
+ .set_drr = dce120_timing_generator_set_drr,
+ .set_static_screen_control = dce120_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce120_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce120_arm_vert_intr,
+};
+
+
+void dce120_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+
+ tg110->offsets = *offsets;
+
+ tg110->base.funcs = &dce120_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC0_CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC0_CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ /*//CRTC requires a minimum HBLANK = 32 pixels and o
+ * Minimum HSYNC = 8 pixels*/
+ tg110->min_h_blank = 32;
+ /*DCE12_CRTC_Block_ARch.doc*/
+ tg110->min_h_front_porch = 0;
+ tg110->min_h_back_porch = 0;
+
+ tg110->min_h_sync_width = 8;
+ tg110->min_v_sync_width = 1;
+ tg110->min_v_blank = 3;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h
new file mode 100644
index 000000000000..549d70b23e82
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE120_H__
+#define __DC_TIMING_GENERATOR_DCE120_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+#include "dce110/dce110_timing_generator.h"
+
+
+void dce120_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+#endif /* __DC_TIMING_GENERATOR_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
new file mode 100644
index 000000000000..c1105895e5fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the 'controller' sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+DCE80 = dce80_timing_generator.o dce80_compressor.o dce80_hw_sequencer.o \
+ dce80_resource.o
+
+AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCE80)
+
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
new file mode 100644
index 000000000000..951f2caba9b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.c
@@ -0,0 +1,834 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#include "gmc/gmc_7_1_d.h"
+
+#include "include/logger_interface.h"
+#include "dce80_compressor.h"
+
+#define DCP_REG(reg)\
+ (reg + cp80->offsets.dcp_offset)
+#define DMIF_REG(reg)\
+ (reg + cp80->offsets.dmif_offset)
+
+static const struct dce80_compressor_reg_offsets reg_offsets[] = {
+{
+ .dcp_offset = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG0_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG1_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG2_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG3_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG4_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+},
+{
+ .dcp_offset = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+ .dmif_offset = (mmDMIF_PG5_DPG_PIPE_DPM_CONTROL
+ - mmDMIF_PG0_DPG_PIPE_DPM_CONTROL),
+}
+};
+
+static const uint32_t dce8_one_lpt_channel_max_resolution = 2048 * 1200;
+
+enum fbc_idle_force {
+ /* Bit 0 - Display registers updated */
+ FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
+
+ /* Bit 2 - FBC_GRPH_COMP_EN register updated */
+ FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
+ /* Bit 3 - FBC_SRC_SEL register updated */
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
+ /* Bit 4 - FBC_MIN_COMPRESSION register updated */
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
+ /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
+ FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
+ /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
+ /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
+
+ /* Bit 24 - Memory write to region 0 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
+ /* Bit 25 - Memory write to region 1 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
+ /* Bit 26 - Memory write to region 2 defined by MC registers */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
+ /* Bit 27 - Memory write to region 3 defined by MC registers. */
+ FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
+
+ /* Bit 28 - Memory write from any client other than MCIF */
+ FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
+ /* Bit 29 - CG statics screen signal is inactive */
+ FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
+};
+
+static uint32_t lpt_size_alignment(struct dce80_compressor *cp80)
+{
+ /*LPT_ALIGNMENT (in bytes) = ROW_SIZE * #BANKS * # DRAM CHANNELS. */
+ return cp80->base.raw_size * cp80->base.banks_num *
+ cp80->base.dram_channels_num;
+}
+
+static uint32_t lpt_memory_control_config(struct dce80_compressor *cp80,
+ uint32_t lpt_control)
+{
+ /*LPT MC Config */
+ if (cp80->base.options.bits.LPT_MC_CONFIG == 1) {
+ /* POSSIBLE VALUES for LPT NUM_PIPES (DRAM CHANNELS):
+ * 00 - 1 CHANNEL
+ * 01 - 2 CHANNELS
+ * 02 - 4 OR 6 CHANNELS
+ * (Only for discrete GPU, N/A for CZ)
+ * 03 - 8 OR 12 CHANNELS
+ * (Only for discrete GPU, N/A for CZ) */
+ switch (cp80->base.dram_channels_num) {
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ case 1:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_PIPES);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_PIPES!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LPT NUM_BANKS is in
+ * GRPH_CONTROL.GRPH_NUM_BANKS register field
+ * Specifies the number of memory banks for tiling
+ * purposes. Only applies to 2D and 3D tiling modes.
+ * POSSIBLE VALUES:
+ * 00 - DCP_GRPH_NUM_BANKS_2BANK: ADDR_SURF_2_BANK
+ * 01 - DCP_GRPH_NUM_BANKS_4BANK: ADDR_SURF_4_BANK
+ * 02 - DCP_GRPH_NUM_BANKS_8BANK: ADDR_SURF_8_BANK
+ * 03 - DCP_GRPH_NUM_BANKS_16BANK: ADDR_SURF_16_BANK */
+ switch (cp80->base.banks_num) {
+ case 16:
+ set_reg_field_value(
+ lpt_control,
+ 3,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 8:
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 4:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ case 2:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_NUM_BANKS);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT NUM_BANKS!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping is in DMIF_ADDR_CALC.
+ * ADDR_CONFIG_PIPE_INTERLEAVE_SIZE register field for
+ * Carrizo specifies the memory interleave per pipe.
+ * It effectively specifies the location of pipe bits in
+ * the memory address.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_PIPE_INTERLEAVE_256B: 256 byte
+ * interleave
+ * 01 - ADDR_CONFIG_PIPE_INTERLEAVE_512B: 512 byte
+ * interleave
+ */
+ switch (cp80->base.channel_interleave_size) {
+ case 256: /*256B */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ case 512: /*512B */
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_PIPE_INTERLEAVE_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT INTERLEAVE_SIZE!!!",
+ __func__);
+ break;
+ }
+
+ /* The mapping for LOW_POWER_TILING_ROW_SIZE is in
+ * DMIF_ADDR_CALC.ADDR_CONFIG_ROW_SIZE register field
+ * for Carrizo. Specifies the size of dram row in bytes.
+ * This should match up with NOOFCOLS field in
+ * MC_ARB_RAMCFG (ROW_SIZE = 4 * 2 ^^ columns).
+ * This register DMIF_ADDR_CALC is not used by the
+ * hardware as it is only used for addrlib assertions.
+ * POSSIBLE VALUES:
+ * 00 - ADDR_CONFIG_1KB_ROW: Treat 1KB as DRAM row
+ * boundary
+ * 01 - ADDR_CONFIG_2KB_ROW: Treat 2KB as DRAM row
+ * boundary
+ * 02 - ADDR_CONFIG_4KB_ROW: Treat 4KB as DRAM row
+ * boundary */
+ switch (cp80->base.raw_size) {
+ case 4096: /*4 KB */
+ set_reg_field_value(
+ lpt_control,
+ 2,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 2048:
+ set_reg_field_value(
+ lpt_control,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ case 1024:
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROW_SIZE);
+ break;
+ default:
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: Invalid LPT ROW_SIZE!!!",
+ __func__);
+ break;
+ }
+ } else {
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: LPT MC Configuration is not provided",
+ __func__);
+ }
+
+ return lpt_control;
+}
+
+static bool is_source_bigger_than_epanel_size(
+ struct dce80_compressor *cp80,
+ uint32_t source_view_width,
+ uint32_t source_view_height)
+{
+ if (cp80->base.embedded_panel_h_size != 0 &&
+ cp80->base.embedded_panel_v_size != 0 &&
+ ((source_view_width * source_view_height) >
+ (cp80->base.embedded_panel_h_size *
+ cp80->base.embedded_panel_v_size)))
+ return true;
+
+ return false;
+}
+
+static uint32_t align_to_chunks_number_per_line(
+ struct dce80_compressor *cp80,
+ uint32_t pixels)
+{
+ return 256 * ((pixels + 255) / 256);
+}
+
+static void wait_for_fbc_state_changed(
+ struct dce80_compressor *cp80,
+ bool enabled)
+{
+ uint8_t counter = 0;
+ uint32_t addr = mmFBC_STATUS;
+ uint32_t value;
+
+ while (counter < 10) {
+ value = dm_read_reg(cp80->base.ctx, addr);
+ if (get_reg_field_value(
+ value,
+ FBC_STATUS,
+ FBC_ENABLE_STATUS) == enabled)
+ break;
+ udelay(10);
+ counter++;
+ }
+
+ if (counter == 10) {
+ dm_logger_write(
+ cp80->base.ctx->logger, LOG_WARNING,
+ "%s: wait counter exceeded, changes to HW not applied",
+ __func__);
+ }
+}
+
+void dce80_compressor_power_up_fbc(struct compressor *compressor)
+{
+ uint32_t value;
+ uint32_t addr;
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_EN);
+ set_reg_field_value(value, 2, FBC_CNTL, FBC_COHERENCY_MODE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_MODE;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_RLE_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_DPCM4_RGB_EN);
+ set_reg_field_value(value, 1, FBC_COMP_MODE, FBC_IND_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ addr = mmFBC_COMP_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_COMP_CNTL, FBC_DEPTH_RGB08_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ /*FBC_MIN_COMPRESSION 0 ==> 2:1 */
+ /* 1 ==> 4:1 */
+ /* 2 ==> 8:1 */
+ /* 0xF ==> 1:1 */
+ set_reg_field_value(value, 0xF, FBC_COMP_CNTL, FBC_MIN_COMPRESSION);
+ dm_write_reg(compressor->ctx, addr, value);
+ compressor->min_compress_ratio = FBC_COMPRESS_RATIO_1TO1;
+
+ value = 0;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT0, value);
+
+ value = 0xFFFFFF;
+ dm_write_reg(compressor->ctx, mmFBC_IND_LUT1, value);
+}
+
+void dce80_compressor_enable_fbc(
+ struct compressor *compressor,
+ uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ (compressor->options.bits.DUMMY_BACKEND == 0) &&
+ (!dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) &&
+ (!is_source_bigger_than_epanel_size(
+ cp80,
+ params->source_view_width,
+ params->source_view_height))) {
+
+ uint32_t addr;
+ uint32_t value;
+
+ /* Before enabling FBC first need to enable LPT if applicable
+ * LPT state should always be changed (enable/disable) while FBC
+ * is disabled */
+ if (compressor->options.bits.LPT_SUPPORT && (paths_num < 2) &&
+ (params->source_view_width *
+ params->source_view_height <=
+ dce8_one_lpt_channel_max_resolution)) {
+ dce80_compressor_enable_lpt(compressor);
+ }
+
+ addr = mmFBC_CNTL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ set_reg_field_value(
+ value,
+ params->inst,
+ FBC_CNTL, FBC_SRC_SEL);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Keep track of enum controller_id FBC is attached to */
+ compressor->is_enabled = true;
+ compressor->attached_inst = params->inst;
+ cp80->offsets = reg_offsets[params->inst];
+
+ /*Toggle it as there is bug in HW */
+ set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+ set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ wait_for_fbc_state_changed(cp80, true);
+ }
+}
+
+void dce80_compressor_disable_fbc(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+
+ if (compressor->options.bits.FBC_SUPPORT &&
+ dce80_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ uint32_t reg_data;
+ /* Turn off compression */
+ reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
+ dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data);
+
+ /* Reset enum controller_id to undefined */
+ compressor->attached_inst = 0;
+ compressor->is_enabled = false;
+
+ /* Whenever disabling FBC make sure LPT is disabled if LPT
+ * supported */
+ if (compressor->options.bits.LPT_SUPPORT)
+ dce80_compressor_disable_lpt(compressor);
+
+ wait_for_fbc_state_changed(cp80, false);
+ }
+}
+
+bool dce80_compressor_is_fbc_enabled_in_hw(
+ struct compressor *compressor,
+ uint32_t *inst)
+{
+ /* Check the hardware register */
+ uint32_t value;
+
+ value = dm_read_reg(compressor->ctx, mmFBC_STATUS);
+ if (get_reg_field_value(value, FBC_STATUS, FBC_ENABLE_STATUS)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ value = dm_read_reg(compressor->ctx, mmFBC_CNTL);
+ if (get_reg_field_value(value, FBC_CNTL, FBC_GRPH_COMP_EN)) {
+ if (inst != NULL)
+ *inst = compressor->attached_inst;
+ return true;
+ }
+
+ return false;
+}
+
+bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *compressor)
+{
+ /* Check the hardware register */
+ uint32_t value = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ return get_reg_field_value(
+ value,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+}
+
+void dce80_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value = 0;
+ uint32_t fbc_pitch = 0;
+ uint32_t compressed_surf_address_low_part =
+ compressor->compr_surface_address.addr.low_part;
+
+ /* Clear content first. */
+ dm_write_reg(
+ compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ 0);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS), 0);
+
+ if (compressor->options.bits.LPT_SUPPORT) {
+ uint32_t lpt_alignment = lpt_size_alignment(cp80);
+
+ if (lpt_alignment != 0) {
+ compressed_surf_address_low_part =
+ ((compressed_surf_address_low_part
+ + (lpt_alignment - 1)) / lpt_alignment)
+ * lpt_alignment;
+ }
+ }
+
+ /* Write address, HIGH has to be first. */
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS_HIGH),
+ compressor->compr_surface_address.addr.high_part);
+ dm_write_reg(compressor->ctx,
+ DCP_REG(mmGRPH_COMPRESS_SURFACE_ADDRESS),
+ compressed_surf_address_low_part);
+
+ fbc_pitch = align_to_chunks_number_per_line(
+ cp80,
+ params->source_view_width);
+
+ if (compressor->min_compress_ratio == FBC_COMPRESS_RATIO_1TO1)
+ fbc_pitch = fbc_pitch / 8;
+ else
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Unexpected DCE8 compression ratio",
+ __func__);
+
+ /* Clear content first. */
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), 0);
+
+ /* Write FBC Pitch. */
+ set_reg_field_value(
+ value,
+ fbc_pitch,
+ GRPH_COMPRESS_PITCH,
+ GRPH_COMPRESS_PITCH);
+ dm_write_reg(compressor->ctx, DCP_REG(mmGRPH_COMPRESS_PITCH), value);
+
+}
+
+void dce80_compressor_disable_lpt(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t inx;
+
+ /* Disable all pipes LPT Stutter */
+ for (inx = 0; inx < 3; inx++) {
+ value =
+ dm_read_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 0,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(
+ compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH),
+ value);
+ }
+
+ /* Disable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Clear selection of Channel(s) containing Compressed Surface */
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 0xFFFFFFFF,
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, mmGMCON_LPT_TARGET, value);
+}
+
+void dce80_compressor_enable_lpt(struct compressor *compressor)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t value;
+ uint32_t addr;
+ uint32_t value_control;
+ uint32_t channels;
+
+ /* Enable LPT Stutter from Display pipe */
+ value = dm_read_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH));
+ set_reg_field_value(
+ value,
+ 1,
+ DPG_PIPE_STUTTER_CONTROL_NONLPTCH,
+ STUTTER_ENABLE_NONLPTCH);
+ dm_write_reg(compressor->ctx,
+ DMIF_REG(mmDPG_PIPE_STUTTER_CONTROL_NONLPTCH), value);
+
+ /* Selection of Channel(s) containing Compressed Surface: 0xfffffff
+ * will disable LPT.
+ * STCTRL_LPT_TARGETn corresponds to channel n. */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value_control = dm_read_reg(compressor->ctx, addr);
+ channels = get_reg_field_value(value_control,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+
+ addr = mmGMCON_LPT_TARGET;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ channels + 1, /* not mentioned in programming guide,
+ but follow DCE8.1 */
+ GMCON_LPT_TARGET,
+ STCTRL_LPT_TARGET);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Enable LPT */
+ addr = mmLOW_POWER_TILING_CONTROL;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ 1,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ENABLE);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce80_compressor_program_lpt_control(
+ struct compressor *compressor,
+ struct compr_addr_and_pitch_params *params)
+{
+ struct dce80_compressor *cp80 = TO_DCE80_COMPRESSOR(compressor);
+ uint32_t rows_per_channel;
+ uint32_t lpt_alignment;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+ uint32_t lpt_control = 0;
+
+ if (!compressor->options.bits.LPT_SUPPORT)
+ return;
+
+ lpt_control = dm_read_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL);
+
+ /* POSSIBLE VALUES for Low Power Tiling Mode:
+ * 00 - Use channel 0
+ * 01 - Use Channel 0 and 1
+ * 02 - Use Channel 0,1,2,3
+ * 03 - reserved */
+ switch (compressor->lpt_channels_num) {
+ /* case 2:
+ * Use Channel 0 & 1 / Not used for DCE 11 */
+ case 1:
+ /*Use Channel 0 for LPT for DCE 11 */
+ set_reg_field_value(
+ lpt_control,
+ 0,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_MODE);
+ break;
+ default:
+ dm_logger_write(
+ compressor->ctx->logger, LOG_WARNING,
+ "%s: Invalid selected DRAM channels for LPT!!!",
+ __func__);
+ break;
+ }
+
+ lpt_control = lpt_memory_control_config(cp80, lpt_control);
+
+ /* Program LOW_POWER_TILING_ROWS_PER_CHAN field which depends on
+ * FBC compressed surface pitch.
+ * LOW_POWER_TILING_ROWS_PER_CHAN = Roundup ((Surface Height *
+ * Surface Pitch) / (Row Size * Number of Channels *
+ * Number of Banks)). */
+ rows_per_channel = 0;
+ lpt_alignment = lpt_size_alignment(cp80);
+ source_view_width =
+ align_to_chunks_number_per_line(
+ cp80,
+ params->source_view_width);
+ source_view_height = (params->source_view_height + 1) & (~0x1);
+
+ if (lpt_alignment != 0) {
+ rows_per_channel = source_view_width * source_view_height * 4;
+ rows_per_channel =
+ (rows_per_channel % lpt_alignment) ?
+ (rows_per_channel / lpt_alignment + 1) :
+ rows_per_channel / lpt_alignment;
+ }
+
+ set_reg_field_value(
+ lpt_control,
+ rows_per_channel,
+ LOW_POWER_TILING_CONTROL,
+ LOW_POWER_TILING_ROWS_PER_CHAN);
+
+ dm_write_reg(compressor->ctx,
+ mmLOW_POWER_TILING_CONTROL, lpt_control);
+}
+
+/*
+ * DCE 11 Frame Buffer Compression Implementation
+ */
+
+void dce80_compressor_set_fbc_invalidation_triggers(
+ struct compressor *compressor,
+ uint32_t fbc_trigger)
+{
+ /* Disable region hit event, FBC_MEMORY_REGION_MASK = 0 (bits 16-19)
+ * for DCE 11 regions cannot be used - does not work with S/G
+ */
+ uint32_t addr = mmFBC_CLIENT_REGION_MASK;
+ uint32_t value = dm_read_reg(compressor->ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ FBC_CLIENT_REGION_MASK,
+ FBC_MEMORY_REGION_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+
+ /* Setup events when to clear all CSM entries (effectively marking
+ * current compressed data invalid)
+ * For DCE 11 CSM metadata 11111 means - "Not Compressed"
+ * Used as the initial value of the metadata sent to the compressor
+ * after invalidation, to indicate that the compressor should attempt
+ * to compress all chunks on the current pass. Also used when the chunk
+ * is not successfully written to memory.
+ * When this CSM value is detected, FBC reads from the uncompressed
+ * buffer. Set events according to passed in value, these events are
+ * valid for DCE8:
+ * - bit 0 - display register updated
+ * - bit 28 - memory write from any client except from MCIF
+ * - bit 29 - CG static screen signal is inactive
+ * In addition, DCE8.1 also needs to set new DCE8.1 specific events
+ * that are used to trigger invalidation on certain register changes,
+ * for example enabling of Alpha Compression may trigger invalidation of
+ * FBC once bit is set. These events are as follows:
+ * - Bit 2 - FBC_GRPH_COMP_EN register updated
+ * - Bit 3 - FBC_SRC_SEL register updated
+ * - Bit 4 - FBC_MIN_COMPRESSION register updated
+ * - Bit 5 - FBC_ALPHA_COMP_EN register updated
+ * - Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated
+ * - Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated
+ */
+ addr = mmFBC_IDLE_FORCE_CLEAR_MASK;
+ value = dm_read_reg(compressor->ctx, addr);
+ set_reg_field_value(
+ value,
+ fbc_trigger |
+ FBC_IDLE_FORCE_GRPH_COMP_EN |
+ FBC_IDLE_FORCE_SRC_SEL_CHANGE |
+ FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
+ FBC_IDLE_FORCE_ALPHA_COMP_EN |
+ FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
+ FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ FBC_IDLE_FORCE_CLEAR_MASK,
+ FBC_IDLE_FORCE_CLEAR_MASK);
+ dm_write_reg(compressor->ctx, addr, value);
+}
+
+void dce80_compressor_construct(struct dce80_compressor *compressor,
+ struct dc_context *ctx)
+{
+ struct dc_bios *bp = ctx->dc_bios;
+ struct embedded_panel_info panel_info;
+
+ compressor->base.options.raw = 0;
+ compressor->base.options.bits.FBC_SUPPORT = true;
+ compressor->base.options.bits.LPT_SUPPORT = true;
+ /* For DCE 11 always use one DRAM channel for LPT */
+ compressor->base.lpt_channels_num = 1;
+ compressor->base.options.bits.DUMMY_BACKEND = false;
+
+ /* Check if this system has more than 1 DRAM channel; if only 1 then LPT
+ * should not be supported */
+ if (compressor->base.memory_bus_width == 64)
+ compressor->base.options.bits.LPT_SUPPORT = false;
+
+ compressor->base.options.bits.CLK_GATING_DISABLED = false;
+
+ compressor->base.ctx = ctx;
+ compressor->base.embedded_panel_h_size = 0;
+ compressor->base.embedded_panel_v_size = 0;
+ compressor->base.memory_bus_width = ctx->asic_id.vram_width;
+ compressor->base.allocated_size = 0;
+ compressor->base.preferred_requested_size = 0;
+ compressor->base.min_compress_ratio = FBC_COMPRESS_RATIO_INVALID;
+ compressor->base.banks_num = 0;
+ compressor->base.raw_size = 0;
+ compressor->base.channel_interleave_size = 0;
+ compressor->base.dram_channels_num = 0;
+ compressor->base.lpt_channels_num = 0;
+ compressor->base.attached_inst = 0;
+ compressor->base.is_enabled = false;
+
+ if (BP_RESULT_OK ==
+ bp->funcs->get_embedded_panel_info(bp, &panel_info)) {
+ compressor->base.embedded_panel_h_size =
+ panel_info.lcd_timing.horizontal_addressable;
+ compressor->base.embedded_panel_v_size =
+ panel_info.lcd_timing.vertical_addressable;
+ }
+}
+
+struct compressor *dce80_compressor_create(struct dc_context *ctx)
+{
+ struct dce80_compressor *cp80 =
+ kzalloc(sizeof(struct dce80_compressor), GFP_KERNEL);
+
+ if (!cp80)
+ return NULL;
+
+ dce80_compressor_construct(cp80, ctx);
+ return &cp80->base;
+}
+
+void dce80_compressor_destroy(struct compressor **compressor)
+{
+ kfree(TO_DCE80_COMPRESSOR(*compressor));
+ *compressor = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
new file mode 100644
index 000000000000..cca58b044402
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_compressor.h
@@ -0,0 +1,78 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMPRESSOR_DCE80_H__
+#define __DC_COMPRESSOR_DCE80_H__
+
+#include "../inc/compressor.h"
+
+#define TO_DCE80_COMPRESSOR(compressor)\
+ container_of(compressor, struct dce80_compressor, base)
+
+struct dce80_compressor_reg_offsets {
+ uint32_t dcp_offset;
+ uint32_t dmif_offset;
+};
+
+struct dce80_compressor {
+ struct compressor base;
+ struct dce80_compressor_reg_offsets offsets;
+};
+
+struct compressor *dce80_compressor_create(struct dc_context *ctx);
+
+void dce80_compressor_construct(struct dce80_compressor *cp80,
+ struct dc_context *ctx);
+
+void dce80_compressor_destroy(struct compressor **cp);
+
+/* FBC RELATED */
+void dce80_compressor_power_up_fbc(struct compressor *cp);
+
+void dce80_compressor_enable_fbc(struct compressor *cp, uint32_t paths_num,
+ struct compr_addr_and_pitch_params *params);
+
+void dce80_compressor_disable_fbc(struct compressor *cp);
+
+void dce80_compressor_set_fbc_invalidation_triggers(struct compressor *cp,
+ uint32_t fbc_trigger);
+
+void dce80_compressor_program_compressed_surface_address_and_pitch(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce80_compressor_is_fbc_enabled_in_hw(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+
+/* LPT RELATED */
+void dce80_compressor_enable_lpt(struct compressor *cp);
+
+void dce80_compressor_disable_lpt(struct compressor *cp);
+
+void dce80_compressor_program_lpt_control(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+
+bool dce80_compressor_is_lpt_enabled_in_hw(struct compressor *cp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
new file mode 100644
index 000000000000..ccfcf1c0eeb3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "dce80_hw_sequencer.h"
+
+#include "dce/dce_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce100/dce100_hw_sequencer.h"
+
+/* include DCE8 register header files */
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+struct dce80_hw_seq_reg_offsets {
+ uint32_t crtc;
+};
+
+static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
+}
+};
+
+#define HW_REG_CRTC(reg, id)\
+ (reg + reg_offsets[id].crtc)
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+/***************************PIPE_CONTROL***********************************/
+
+static bool dce80_enable_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ enum bp_result bp_result = BP_RESULT_OK;
+ enum bp_pipe_control_action cntl;
+ struct dc_context *ctx = dc->ctx;
+
+ if (power_gating == PIPE_GATING_CONTROL_INIT)
+ cntl = ASIC_PIPE_INIT;
+ else if (power_gating == PIPE_GATING_CONTROL_ENABLE)
+ cntl = ASIC_PIPE_ENABLE;
+ else
+ cntl = ASIC_PIPE_DISABLE;
+
+ if (!(power_gating == PIPE_GATING_CONTROL_INIT && controller_id != 0)){
+
+ bp_result = dcb->funcs->enable_disp_power_gating(
+ dcb, controller_id + 1, cntl);
+
+ /* Revert MASTER_UPDATE_MODE to 0 because bios sets it 2
+ * by default when command table is called
+ */
+ dm_write_reg(ctx,
+ HW_REG_CRTC(mmMASTER_UPDATE_MODE, controller_id),
+ 0);
+ }
+
+ if (bp_result == BP_RESULT_OK)
+ return true;
+ else
+ return false;
+}
+
+void dce80_hw_sequencer_construct(struct dc *dc)
+{
+ dce110_hw_sequencer_construct(dc);
+
+ dc->hwss.enable_display_power_gating = dce80_enable_display_power_gating;
+ dc->hwss.pipe_control_lock = dce_pipe_control_lock;
+ dc->hwss.set_bandwidth = dce100_set_bandwidth;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
new file mode 100644
index 000000000000..7a1b31def66f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.h
@@ -0,0 +1,36 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCE80_H__
+#define __DC_HWSS_DCE80_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dce80_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_HWSS_DCE80_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
new file mode 100644
index 000000000000..9c18efd3446f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -0,0 +1,1257 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "dm_services.h"
+
+#include "link_encoder.h"
+#include "stream_encoder.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "irq/dce80/irq_service_dce80.h"
+#include "dce110/dce110_timing_generator.h"
+#include "dce110/dce110_resource.h"
+#include "dce80/dce80_timing_generator.h"
+#include "dce/dce_mem_input.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_mem_input.h"
+#include "dce/dce_ipp.h"
+#include "dce/dce_transform.h"
+#include "dce/dce_opp.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "dce80/dce80_hw_sequencer.h"
+#include "dce100/dce100_resource.h"
+
+#include "reg_helper.h"
+
+/* TODO remove this include */
+
+#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+#endif
+
+#ifndef mmDP_DPHY_INTERNAL_CTRL
+#define mmDP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x1CDE
+#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x1FDE
+#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x42DE
+#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x45DE
+#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x48DE
+#define mmDP5_DP_DPHY_INTERNAL_CTRL 0x4BDE
+#define mmDP6_DP_DPHY_INTERNAL_CTRL 0x4EDE
+#endif
+
+
+#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_2 0x05CB
+ #define mmBIOS_SCRATCH_6 0x05CF
+#endif
+
+#ifndef mmDP_DPHY_FAST_TRAINING
+ #define mmDP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP0_DP_DPHY_FAST_TRAINING 0x1CCE
+ #define mmDP1_DP_DPHY_FAST_TRAINING 0x1FCE
+ #define mmDP2_DP_DPHY_FAST_TRAINING 0x42CE
+ #define mmDP3_DP_DPHY_FAST_TRAINING 0x45CE
+ #define mmDP4_DP_DPHY_FAST_TRAINING 0x48CE
+ #define mmDP5_DP_DPHY_FAST_TRAINING 0x4BCE
+ #define mmDP6_DP_DPHY_FAST_TRAINING 0x4ECE
+#endif
+
+
+#ifndef mmHPD_DC_HPD_CONTROL
+ #define mmHPD_DC_HPD_CONTROL 0x189A
+ #define mmHPD0_DC_HPD_CONTROL 0x189A
+ #define mmHPD1_DC_HPD_CONTROL 0x18A2
+ #define mmHPD2_DC_HPD_CONTROL 0x18AA
+ #define mmHPD3_DC_HPD_CONTROL 0x18B2
+ #define mmHPD4_DC_HPD_CONTROL 0x18BA
+ #define mmHPD5_DC_HPD_CONTROL 0x18C2
+#endif
+
+#define DCE11_DIG_FE_CNTL 0x4a00
+#define DCE11_DIG_BE_CNTL 0x4a47
+#define DCE11_DP_SEC 0x4ac3
+
+static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
+ {
+ .crtc = (mmCRTC0_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmGRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG0_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC1_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG1_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC2_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG2_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC3_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG3_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC4_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG4_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ },
+ {
+ .crtc = (mmCRTC5_CRTC_CONTROL - mmCRTC_CONTROL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmGRPH_CONTROL),
+ .dmif = (mmDMIF_PG5_DPG_WATERMARK_MASK_CONTROL
+ - mmDPG_WATERMARK_MASK_CONTROL),
+ }
+};
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+
+static const struct dce_disp_clk_registers disp_clk_regs = {
+ CLK_COMMON_REG_LIST_DCE_BASE()
+};
+
+static const struct dce_disp_clk_shift disp_clk_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_disp_clk_mask disp_clk_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_COMMON_REG_LIST_DCE_BASE(id)\
+}
+
+static const struct dce_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+ ipp_regs(4),
+ ipp_regs(5)
+};
+
+static const struct dce_ipp_shift ipp_shift = {
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce_ipp_mask ipp_mask = {
+ IPP_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+#define transform_regs(id)\
+[id] = {\
+ XFM_COMMON_REG_LIST_DCE80(id)\
+}
+
+static const struct dce_transform_registers xfm_regs[] = {
+ transform_regs(0),
+ transform_regs(1),
+ transform_regs(2),
+ transform_regs(3),
+ transform_regs(4),
+ transform_regs(5)
+};
+
+static const struct dce_transform_shift xfm_shift = {
+ XFM_COMMON_MASK_SH_LIST_DCE80(__SHIFT)
+};
+
+static const struct dce_transform_mask xfm_mask = {
+ XFM_COMMON_MASK_SH_LIST_DCE80(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCE80_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_COMMON_REG_LIST_DCE_BASE(id),\
+ .AFMT_CNTL = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+ stream_enc_regs(4),
+ stream_enc_regs(5),
+ stream_enc_regs(6)
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_DCE_80_REG_LIST(id),\
+}
+
+static const struct dce_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+ opp_regs(4),
+ opp_regs(5)
+};
+
+static const struct dce_opp_shift opp_shift = {
+ OPP_COMMON_MASK_SH_LIST_DCE_80(__SHIFT)
+};
+
+static const struct dce_opp_mask opp_mask = {
+ OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+ audio_regs(4),
+ audio_regs(5),
+ audio_regs(6),
+};
+
+static const struct dce_audio_shift audio_shift = {
+ AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define clk_src_regs(id)\
+[id] = {\
+ CS_COMMON_REG_LIST_DCE_80(id),\
+}
+
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0),
+ clk_src_regs(1),
+ clk_src_regs(2)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
+};
+
+static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
+};
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 6,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 3,
+};
+
+static const struct resource_caps res_cap_81 = {
+ .num_timing_generator = 4,
+ .num_audio = 7,
+ .num_stream_encoder = 7,
+ .num_pll = 3,
+};
+
+static const struct resource_caps res_cap_83 = {
+ .num_timing_generator = 2,
+ .num_audio = 6,
+ .num_stream_encoder = 6,
+ .num_pll = 2,
+};
+
+#define CTX ctx
+#define REG(reg) mm ## reg
+
+#ifndef mmCC_DC_HDMI_STRAPS
+#define mmCC_DC_HDMI_STRAPS 0x1918
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE_MASK 0x40
+#define CC_DC_HDMI_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x700
+#define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#endif
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ REG_GET_2(CC_DC_HDMI_STRAPS,
+ HDMI_DISABLE, &straps->hdmi_disable,
+ AUDIO_STREAM_NUMBER, &straps->audio_stream_number);
+
+ REG_GET(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO, &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct timing_generator *dce80_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ struct dce110_timing_generator *tg110 =
+ kzalloc(sizeof(struct dce110_timing_generator), GFP_KERNEL);
+
+ if (!tg110)
+ return NULL;
+
+ dce80_timing_generator_construct(tg110, ctx, instance, offsets);
+ return &tg110->base;
+}
+
+static struct output_pixel_processor *dce80_opp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce110_opp *opp =
+ kzalloc(sizeof(struct dce110_opp), GFP_KERNEL);
+
+ if (!opp)
+ return NULL;
+
+ dce110_opp_construct(opp,
+ ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static struct stream_encoder *dce80_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = mm ## block ## id ## _ ## reg_name
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCE8_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCE8_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCE8_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dce80_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dce80_stream_encoder_create,
+ .create_hwseq = dce80_hwseq_create,
+};
+
+#define mi_inst_regs(id) { \
+ MI_DCE8_REG_LIST(id), \
+ .MC_HUB_RDREQ_DMIF_LIMIT = mmMC_HUB_RDREQ_DMIF_LIMIT \
+}
+static const struct dce_mem_input_registers mi_regs[] = {
+ mi_inst_regs(0),
+ mi_inst_regs(1),
+ mi_inst_regs(2),
+ mi_inst_regs(3),
+ mi_inst_regs(4),
+ mi_inst_regs(5),
+};
+
+static const struct dce_mem_input_shift mi_shifts = {
+ MI_DCE8_MASK_SH_LIST(__SHIFT),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE__SHIFT
+};
+
+static const struct dce_mem_input_mask mi_masks = {
+ MI_DCE8_MASK_SH_LIST(_MASK),
+ .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK
+};
+
+static struct mem_input *dce80_mem_input_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_mem_input *dce_mi = kzalloc(sizeof(struct dce_mem_input),
+ GFP_KERNEL);
+
+ if (!dce_mi) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks);
+ dce_mi->wa.single_head_rdreq_dmif_limit = 2;
+ return &dce_mi->base;
+}
+
+static void dce80_transform_destroy(struct transform **xfm)
+{
+ kfree(TO_DCE_TRANSFORM(*xfm));
+ *xfm = NULL;
+}
+
+static struct transform *dce80_transform_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_transform *transform =
+ kzalloc(sizeof(struct dce_transform), GFP_KERNEL);
+
+ if (!transform)
+ return NULL;
+
+ dce_transform_construct(transform, ctx, inst,
+ &xfm_regs[inst], &xfm_shift, &xfm_mask);
+ transform->prescaler_on = false;
+ return &transform->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 297000,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dce80_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+ return &enc110->base;
+}
+
+struct clock_source *dce80_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+void dce80_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static struct input_pixel_processor *dce80_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dce_ipp *ipp = kzalloc(sizeof(struct dce_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dce_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+static void destruct(struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ dce110_opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.transforms[i] != NULL)
+ dce80_transform_destroy(&pool->base.transforms[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ dce_ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.mis[i] != NULL) {
+ kfree(TO_DCE_MEM_INPUT(pool->base.mis[i]));
+ pool->base.mis[i] = NULL;
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL)
+ kfree(DCE110STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dce80_clock_source_destroy(&pool->base.clock_sources[i]);
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL)
+ dce80_clock_source_destroy(&pool->base.dp_clock_source);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i] != NULL) {
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+ }
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ dce110_resource_build_pipe_hw_param(pipe_ctx);
+
+ resource_build_info_frame(pipe_ctx);
+
+ return DC_OK;
+}
+
+bool dce80_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+
+ return true;
+}
+
+static bool dce80_validate_surface_sets(
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->stream_status[i].plane_count == 0)
+ continue;
+
+ if (context->stream_status[i].plane_count > 1)
+ return false;
+
+ if (context->stream_status[i].plane_states[0]->format
+ >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return false;
+ }
+
+ return true;
+}
+
+enum dc_status dce80_validate_global(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (!dce80_validate_surface_sets(context))
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+enum dc_status dce80_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+
+ if (result == DC_OK)
+ result = dce80_validate_bandwidth(dc, context);
+
+ return result;
+}
+
+static void dce80_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
+
+ destruct(dce110_pool);
+ kfree(dce110_pool);
+ *pool = NULL;
+}
+
+static const struct resource_funcs dce80_res_pool_funcs = {
+ .destroy = dce80_destroy_resource_pool,
+ .link_enc_create = dce80_link_encoder_create,
+ .validate_guaranteed = dce80_validate_guaranteed,
+ .validate_bandwidth = dce80_validate_bandwidth,
+ .validate_plane = dce100_validate_plane,
+ .add_stream_to_ctx = dce100_add_stream_to_ctx,
+ .validate_global = dce80_validate_global
+};
+
+static bool dce80_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce80_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce80_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce81_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_81;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_81.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[2] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 3;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false);
+ pool->base.clk_src_count = 2;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce81_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce81_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static bool dce83_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dce110_resource_pool *pool)
+{
+ unsigned int i;
+ struct dc_context *ctx = dc->ctx;
+ struct dc_firmware_info info;
+ struct dc_bios *bp;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_83;
+ pool->base.funcs = &dce80_res_pool_funcs;
+
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+ pool->base.pipe_count = res_cap_83.num_timing_generator;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 40;
+ dc->caps.max_cursor_size = 128;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ bp = ctx->dc_bios;
+
+ if ((bp->funcs->get_firmware_info(bp, &info) == BP_RESULT_OK) &&
+ info.external_clock_source_frequency_for_dp != 0) {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false);
+ pool->base.clock_sources[1] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 2;
+
+ } else {
+ pool->base.dp_clock_source =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true);
+
+ pool->base.clock_sources[0] =
+ dce80_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false);
+ pool->base.clk_src_count = 1;
+ }
+
+ if (pool->base.dp_clock_source == NULL) {
+ dm_error("DC: failed to create dp clock source!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+ }
+
+ pool->base.display_clock = dce_disp_clk_create(ctx,
+ &disp_clk_regs,
+ &disp_clk_shift,
+ &disp_clk_mask);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ pool->base.display_clock->max_clks_state =
+ static_clk_info.max_clocks_state;
+
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dce80_create(&init_data);
+ if (!pool->base.irqs)
+ goto res_create_fail;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.timing_generators[i] = dce80_timing_generator_create(
+ ctx, i, &dce80_tg_offsets[i]);
+ if (pool->base.timing_generators[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.mis[i] = dce80_mem_input_create(ctx, i);
+ if (pool->base.mis[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create memory input!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.ipps[i] = dce80_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create input pixel processor!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.transforms[i] = dce80_transform_create(ctx, i);
+ if (pool->base.transforms[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create transform!\n");
+ goto res_create_fail;
+ }
+
+ pool->base.opps[i] = dce80_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create output pixel processor!\n");
+ goto res_create_fail;
+ }
+ }
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ &res_create_funcs))
+ goto res_create_fail;
+
+ /* Create hardware sequencer */
+ dce80_hw_sequencer_construct(dc);
+
+ return true;
+
+res_create_fail:
+ destruct(pool);
+ return false;
+}
+
+struct resource_pool *dce83_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dce110_resource_pool *pool =
+ kzalloc(sizeof(struct dce110_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (dce83_construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
new file mode 100644
index 000000000000..eff31ab83a39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCE80_H__
+#define __DC_RESOURCE_DCE80_H__
+
+#include "core_types.h"
+
+struct dc;
+struct resource_pool;
+
+struct resource_pool *dce80_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce81_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+struct resource_pool *dce83_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCE80_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
new file mode 100644
index 000000000000..265894851493
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/* include DCE8 register header files */
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "dc_types.h"
+
+#include "include/grph_object_id.h"
+#include "include/logger_interface.h"
+#include "../dce110/dce110_timing_generator.h"
+#include "dce80_timing_generator.h"
+
+#include "timing_generator.h"
+
+enum black_color_format {
+ BLACK_COLOR_FORMAT_RGB_FULLRANGE = 0, /* used as index in array */
+ BLACK_COLOR_FORMAT_RGB_LIMITED,
+ BLACK_COLOR_FORMAT_YUV_TV,
+ BLACK_COLOR_FORMAT_YUV_CV,
+ BLACK_COLOR_FORMAT_YUV_SUPER_AA,
+
+ BLACK_COLOR_FORMAT_COUNT
+};
+
+static const struct dce110_timing_generator_offsets reg_offsets[] = {
+{
+ .crtc = (mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP0_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC1_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP1_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC2_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP2_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC3_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP3_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC4_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP4_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+},
+{
+ .crtc = (mmCRTC5_DCFE_MEM_LIGHT_SLEEP_CNTL - mmCRTC0_DCFE_MEM_LIGHT_SLEEP_CNTL),
+ .dcp = (mmDCP5_GRPH_CONTROL - mmDCP0_GRPH_CONTROL),
+}
+};
+
+#define NUMBER_OF_FRAME_TO_WAIT_ON_TRIGGERED_RESET 10
+
+#define MAX_H_TOTAL (CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1)
+#define MAX_V_TOTAL (CRTC_V_TOTAL__CRTC_V_TOTAL_MASKhw + 1)
+
+#define CRTC_REG(reg) (reg + tg110->offsets.crtc)
+#define DCP_REG(reg) (reg + tg110->offsets.dcp)
+#define DMIF_REG(reg) (reg + tg110->offsets.dmif)
+
+void program_pix_dur(struct timing_generator *tg, uint32_t pix_clk_khz)
+{
+ uint64_t pix_dur;
+ uint32_t addr = mmDMIF_PG0_DPG_PIPE_ARBITRATION_CONTROL1
+ + DCE110TG_FROM_TG(tg)->offsets.dmif;
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (pix_clk_khz == 0)
+ return;
+
+ pix_dur = 1000000000 / pix_clk_khz;
+
+ set_reg_field_value(
+ value,
+ pix_dur,
+ DPG_PIPE_ARBITRATION_CONTROL1,
+ PIXEL_DURATION);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
+
+static void program_timing(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios)
+{
+ if (!use_vbios)
+ program_pix_dur(tg, timing->pix_clk_khz);
+
+ dce110_tg_program_timing(tg, timing, use_vbios);
+}
+
+static const struct timing_generator_funcs dce80_tg_funcs = {
+ .validate_timing = dce110_tg_validate_timing,
+ .program_timing = program_timing,
+ .enable_crtc = dce110_timing_generator_enable_crtc,
+ .disable_crtc = dce110_timing_generator_disable_crtc,
+ .is_counter_moving = dce110_timing_generator_is_counter_moving,
+ .get_position = dce110_timing_generator_get_position,
+ .get_frame_count = dce110_timing_generator_get_vblank_counter,
+ .get_scanoutpos = dce110_timing_generator_get_crtc_scanoutpos,
+ .set_early_control = dce110_timing_generator_set_early_control,
+ .wait_for_state = dce110_tg_wait_for_state,
+ .set_blank = dce110_tg_set_blank,
+ .is_blanked = dce110_tg_is_blanked,
+ .set_colors = dce110_tg_set_colors,
+ .set_overscan_blank_color =
+ dce110_timing_generator_set_overscan_color_black,
+ .set_blank_color = dce110_timing_generator_program_blank_color,
+ .disable_vga = dce110_timing_generator_disable_vga,
+ .did_triggered_reset_occur =
+ dce110_timing_generator_did_triggered_reset_occur,
+ .setup_global_swap_lock =
+ dce110_timing_generator_setup_global_swap_lock,
+ .enable_reset_trigger = dce110_timing_generator_enable_reset_trigger,
+ .disable_reset_trigger = dce110_timing_generator_disable_reset_trigger,
+ .tear_down_global_swap_lock =
+ dce110_timing_generator_tear_down_global_swap_lock,
+ .set_drr = dce110_timing_generator_set_drr,
+ .set_static_screen_control =
+ dce110_timing_generator_set_static_screen_control,
+ .set_test_pattern = dce110_timing_generator_set_test_pattern,
+ .arm_vert_intr = dce110_arm_vert_intr,
+
+ /* DCE8.0 overrides */
+ .enable_advanced_request =
+ dce80_timing_generator_enable_advanced_request,
+};
+
+void dce80_timing_generator_construct(
+ struct dce110_timing_generator *tg110,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets)
+{
+ tg110->controller_id = CONTROLLER_ID_D0 + instance;
+ tg110->base.inst = instance;
+ tg110->offsets = *offsets;
+ tg110->derived_offsets = reg_offsets[instance];
+
+ tg110->base.funcs = &dce80_tg_funcs;
+
+ tg110->base.ctx = ctx;
+ tg110->base.bp = ctx->dc_bios;
+
+ tg110->max_h_total = CRTC_H_TOTAL__CRTC_H_TOTAL_MASK + 1;
+ tg110->max_v_total = CRTC_V_TOTAL__CRTC_V_TOTAL_MASK + 1;
+
+ tg110->min_h_blank = 56;
+ tg110->min_h_front_porch = 4;
+ tg110->min_h_back_porch = 4;
+}
+
+void dce80_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing)
+{
+ struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
+ uint32_t addr = CRTC_REG(mmCRTC_START_LINE_CONTROL);
+ uint32_t value = dm_read_reg(tg->ctx, addr);
+
+ if (enable) {
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_LEGACY_REQUESTOR_EN);
+ }
+
+ if ((timing->v_sync_width + timing->v_front_porch) <= 3) {
+ set_reg_field_value(
+ value,
+ 3,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 0,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ } else {
+ set_reg_field_value(
+ value,
+ 4,
+ CRTC_START_LINE_CONTROL,
+ CRTC_ADVANCED_START_LINE_POSITION);
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PREFETCH_EN);
+ }
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_PROGRESSIVE_START_LINE_EARLY);
+
+ set_reg_field_value(
+ value,
+ 1,
+ CRTC_START_LINE_CONTROL,
+ CRTC_INTERLACE_START_LINE_EARLY);
+
+ dm_write_reg(tg->ctx, addr, value);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
new file mode 100644
index 000000000000..9cebb24c94c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCE80_H__
+#define __DC_TIMING_GENERATOR_DCE80_H__
+
+#include "timing_generator.h"
+#include "../include/grph_object_id.h"
+
+/* DCE8.0 implementation inherits from DCE11.0 */
+void dce80_timing_generator_construct(
+ struct dce110_timing_generator *tg,
+ struct dc_context *ctx,
+ uint32_t instance,
+ const struct dce110_timing_generator_offsets *offsets);
+
+/******** HW programming ************/
+void dce80_timing_generator_enable_advanced_request(
+ struct timing_generator *tg,
+ bool enable,
+ const struct dc_crtc_timing *timing);
+
+#endif /* __DC_TIMING_GENERATOR_DCE80_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
new file mode 100644
index 000000000000..ebeb88283a14
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for DCN.
+
+DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
+ dcn10_dpp.o dcn10_opp.o dcn10_timing_generator.o \
+ dcn10_hubp.o dcn10_mpc.o \
+ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o
+
+AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
new file mode 100644
index 000000000000..7f579cb19f4b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+
+#include "dcn10_cm_common.h"
+
+#define REG(reg) reg
+
+#define CTX \
+ ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ reg->shifts.field_name, reg->masks.field_name
+
+void cm_helper_program_color_matrices(
+ struct dc_context *ctx,
+ const uint16_t *regval,
+ const struct color_matrices_reg *reg)
+{
+ uint32_t cur_csc_reg;
+ unsigned int i = 0;
+
+ for (cur_csc_reg = reg->csc_c11_c12;
+ cur_csc_reg <= reg->csc_c33_c34;
+ cur_csc_reg++) {
+
+ const uint16_t *regval0 = &(regval[2 * i]);
+ const uint16_t *regval1 = &(regval[(2 * i) + 1]);
+
+ REG_SET_2(cur_csc_reg, 0,
+ csc_c11, *regval0,
+ csc_c12, *regval1);
+
+ i++;
+ }
+
+}
+
+void cm_helper_program_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct xfer_func_reg *reg)
+{
+ uint32_t reg_region_cur;
+ unsigned int i = 0;
+
+ REG_SET_2(reg->start_cntl_b, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_g, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+ REG_SET_2(reg->start_cntl_r, 0,
+ exp_region_start, params->arr_points[0].custom_float_x,
+ exp_resion_start_segment, 0);
+
+ REG_SET(reg->start_slope_cntl_b, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ REG_SET(reg->start_slope_cntl_g, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ REG_SET(reg->start_slope_cntl_r, 0,
+ field_region_linear_slope, params->arr_points[0].custom_float_slope);
+
+ REG_SET(reg->start_end_cntl1_b, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_b, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ REG_SET(reg->start_end_cntl1_g, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_g, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ REG_SET(reg->start_end_cntl1_r, 0,
+ field_region_end, params->arr_points[1].custom_float_x);
+ REG_SET_2(reg->start_end_cntl2_r, 0,
+ field_region_end_slope, params->arr_points[1].custom_float_slope,
+ field_region_end_base, params->arr_points[1].custom_float_y);
+
+ for (reg_region_cur = reg->region_start;
+ reg_region_cur <= reg->region_end;
+ reg_region_cur++) {
+
+ const struct gamma_curve *curve0 = &(params->arr_curve_points[2 * i]);
+ const struct gamma_curve *curve1 = &(params->arr_curve_points[(2 * i) + 1]);
+
+ REG_SET_4(reg_region_cur, 0,
+ exp_region0_lut_offset, curve0->offset,
+ exp_region0_num_segments, curve0->segments_num,
+ exp_region1_lut_offset, curve1->offset,
+ exp_region1_num_segments, curve1->segments_num);
+
+ i++;
+ }
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
new file mode 100644
index 000000000000..64836dcf21f2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN10_CM_COMMON_H__
+#define __DAL_DCN10_CM_COMMON_H__
+
+#define TF_HELPER_REG_FIELD_LIST(type) \
+ type exp_region0_lut_offset; \
+ type exp_region0_num_segments; \
+ type exp_region1_lut_offset; \
+ type exp_region1_num_segments;\
+ type field_region_end;\
+ type field_region_end_slope;\
+ type field_region_end_base;\
+ type exp_region_start;\
+ type exp_resion_start_segment;\
+ type field_region_linear_slope
+
+#define TF_CM_REG_FIELD_LIST(type) \
+ type csc_c11; \
+ type csc_c12
+
+struct xfer_func_shift {
+ TF_HELPER_REG_FIELD_LIST(uint8_t);
+};
+
+struct xfer_func_mask {
+ TF_HELPER_REG_FIELD_LIST(uint32_t);
+};
+
+struct xfer_func_reg {
+ struct xfer_func_shift shifts;
+ struct xfer_func_mask masks;
+
+ uint32_t start_cntl_b;
+ uint32_t start_cntl_g;
+ uint32_t start_cntl_r;
+ uint32_t start_slope_cntl_b;
+ uint32_t start_slope_cntl_g;
+ uint32_t start_slope_cntl_r;
+ uint32_t start_end_cntl1_b;
+ uint32_t start_end_cntl2_b;
+ uint32_t start_end_cntl1_g;
+ uint32_t start_end_cntl2_g;
+ uint32_t start_end_cntl1_r;
+ uint32_t start_end_cntl2_r;
+ uint32_t region_start;
+ uint32_t region_end;
+};
+
+struct cm_color_matrix_shift {
+ TF_CM_REG_FIELD_LIST(uint8_t);
+};
+
+struct cm_color_matrix_mask {
+ TF_CM_REG_FIELD_LIST(uint32_t);
+};
+
+struct color_matrices_reg{
+ struct cm_color_matrix_shift shifts;
+ struct cm_color_matrix_mask masks;
+
+ uint32_t csc_c11_c12;
+ uint32_t csc_c33_c34;
+};
+
+void cm_helper_program_color_matrices(
+ struct dc_context *ctx,
+ const uint16_t *regval,
+ const struct color_matrices_reg *reg);
+
+void cm_helper_program_xfer_func(
+ struct dc_context *ctx,
+ const struct pwl_params *params,
+ const struct xfer_func_reg *reg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
new file mode 100644
index 000000000000..74e7c82bdc76
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+enum pixel_format_description {
+ PIXEL_FORMAT_FIXED = 0,
+ PIXEL_FORMAT_FIXED16,
+ PIXEL_FORMAT_FLOAT
+
+};
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
+
+/* Program gamut remap in bypass mode */
+void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp)
+{
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ /* Gamut remap in bypass */
+}
+
+#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19))
+
+
+bool dpp_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ uint32_t pixel_width;
+
+ if (scl_data->viewport.width > scl_data->recout.width)
+ pixel_width = scl_data->recout.width;
+ else
+ pixel_width = scl_data->viewport.width;
+
+ /* TODO: add lb check */
+
+ /* No support for programming ratio of 4, drop to 3.99999.. */
+ if (scl_data->ratios.horz.value == (4ll << 32))
+ scl_data->ratios.horz.value--;
+ if (scl_data->ratios.vert.value == (4ll << 32))
+ scl_data->ratios.vert.value--;
+ if (scl_data->ratios.horz_c.value == (4ll << 32))
+ scl_data->ratios.horz_c.value--;
+ if (scl_data->ratios.vert_c.value == (4ll << 32))
+ scl_data->ratios.vert_c.value--;
+
+ /* Set default taps if none are provided */
+ if (in_taps->h_taps == 0)
+ scl_data->taps.h_taps = 4;
+ else
+ scl_data->taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0)
+ scl_data->taps.v_taps = 4;
+ else
+ scl_data->taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0)
+ scl_data->taps.v_taps_c = 2;
+ else
+ scl_data->taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0)
+ scl_data->taps.h_taps_c = 2;
+ /* Only 1 and even h_taps_c are supported by hw */
+ else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ scl_data->taps.h_taps_c = in_taps->h_taps_c;
+
+ if (!dpp->ctx->dc->debug.always_scale) {
+ if (IDENTITY_RATIO(scl_data->ratios.horz))
+ scl_data->taps.h_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.v_taps = 1;
+ /*
+ * Spreadsheet doesn't handle taps_c is one properly,
+ * need to force Chroma to always be scaled to pass
+ * bandwidth validation.
+ */
+ }
+
+ return true;
+}
+
+void dpp_reset(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ dpp->filter_h_c = NULL;
+ dpp->filter_v_c = NULL;
+ dpp->filter_h = NULL;
+ dpp->filter_v = NULL;
+
+ /* set boundary mode to 0 */
+ REG_SET(DSCL_CONTROL, 0, SCL_BOUNDARY_MODE, 0);
+}
+
+
+
+static void dpp1_cm_set_regamma_pwl(
+ struct dpp *dpp_base, const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ dpp1_cm_power_on_regamma_lut(dpp_base, true);
+ dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe);
+
+ if (dpp->is_write_to_ram_a_safe)
+ dpp1_cm_program_regamma_luta_settings(dpp_base, params);
+ else
+ dpp1_cm_program_regamma_lutb_settings(dpp_base, params);
+
+ dpp1_cm_program_regamma_lut(
+ dpp_base, params->rgb_resulted, params->hw_points_num);
+}
+
+static void dpp1_cm_set_regamma_mode(
+ struct dpp *dpp_base,
+ enum opp_regamma mode)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint32_t re_mode = 0;
+ uint32_t obuf_bypass = 0; /* need for pipe split */
+ uint32_t obuf_hupscale = 0;
+
+ switch (mode) {
+ case OPP_REGAMMA_BYPASS:
+ re_mode = 0;
+ break;
+ case OPP_REGAMMA_SRGB:
+ re_mode = 1;
+ break;
+ case OPP_REGAMMA_3_6:
+ re_mode = 2;
+ break;
+ case OPP_REGAMMA_USER:
+ re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4;
+ dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe;
+ break;
+ default:
+ break;
+ }
+
+ REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode);
+ REG_UPDATE_2(OBUF_CONTROL,
+ OBUF_BYPASS, obuf_bypass,
+ OBUF_H_2X_UPSCALE_EN, obuf_hupscale);
+}
+
+static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\
+ enum pixel_format_description *fmt)
+{
+
+ if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F ||
+ input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F)
+ *fmt = PIXEL_FORMAT_FLOAT;
+ else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+ *fmt = PIXEL_FORMAT_FIXED16;
+ else
+ *fmt = PIXEL_FORMAT_FIXED;
+}
+
+static void dpp1_set_degamma_format_float(
+ struct dpp *dpp_base,
+ bool is_float)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (is_float) {
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3);
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1);
+ } else {
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2);
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0);
+ }
+}
+
+void dpp1_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode)
+{
+ uint32_t pixel_format;
+ uint32_t alpha_en;
+ enum pixel_format_description fmt ;
+ enum dc_color_space color_space;
+ enum dcn10_input_csc_select select;
+ bool is_float;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ bool force_disable_cursor = false;
+
+ dpp1_setup_format_flags(input_format, &fmt);
+ alpha_en = 1;
+ pixel_format = 0;
+ color_space = COLOR_SPACE_SRGB;
+ select = INPUT_CSC_SELECT_BYPASS;
+ is_float = false;
+
+ switch (fmt) {
+ case PIXEL_FORMAT_FIXED:
+ case PIXEL_FORMAT_FIXED16:
+ /*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode,
+ OUTPUT_FP, 0);
+ break;
+ case PIXEL_FORMAT_FLOAT:
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode,
+ OUTPUT_FP, 1);
+ is_float = true;
+ break;
+ default:
+
+ break;
+ }
+
+ dpp1_set_degamma_format_float(dpp_base, is_float);
+
+ switch (input_format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ pixel_format = 22;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ default:
+ break;
+ }
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ dpp1_program_input_csc(dpp_base, color_space, select);
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+ }
+}
+
+void dpp1_set_cursor_attributes(
+ struct dpp *dpp_base,
+ const struct dc_cursor_attributes *attr)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dc_cursor_color_format color_format = attr->color_format;
+
+ REG_UPDATE_2(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0);
+
+ if (color_format == CURSOR_MODE_MONO) {
+ /* todo: clarify what to program these to */
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
+
+ /* TODO: Fixed vs float */
+
+ REG_UPDATE_3(FORMAT_CONTROL,
+ CNVC_BYPASS, 0,
+ FORMAT_CONTROL__ALPHA_EN, 1,
+ FORMAT_EXPANSION_MODE, 0);
+}
+
+
+void dpp1_set_cursor_position(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+
+ if (src_x_offset >= (int)param->viewport_width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)width < 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, cur_en);
+
+}
+
+static const struct dpp_funcs dcn10_dpp_funcs = {
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .opp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment,
+ .opp_set_csc_default = dpp1_cm_set_output_csc_default,
+ .opp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut,
+ .opp_program_regamma_lut = dpp1_cm_program_regamma_lut,
+ .opp_configure_regamma_lut = dpp1_cm_configure_regamma_lut,
+ .opp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings,
+ .opp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings,
+ .opp_program_regamma_pwl = dpp1_cm_set_regamma_pwl,
+ .opp_set_regamma_mode = dpp1_cm_set_regamma_mode,
+ .ipp_set_degamma = dpp1_set_degamma,
+ .ipp_program_input_lut = dpp1_program_input_lut,
+ .ipp_program_degamma_pwl = dpp1_set_degamma_pwl,
+ .ipp_setup = dpp1_cnv_setup,
+ .ipp_full_bypass = dpp1_full_bypass,
+ .set_cursor_attributes = dpp1_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+};
+
+static struct dpp_caps dcn10_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT,
+ .dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions,
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dpp1_construct(
+ struct dcn10_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_dpp_registers *tf_regs,
+ const struct dcn_dpp_shift *tf_shift,
+ const struct dcn_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn10_dpp_funcs;
+ dpp->base.caps = &dcn10_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
new file mode 100644
index 000000000000..a9782b1aba47
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h
@@ -0,0 +1,1386 @@
+/* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DPP_DCN10_H__
+#define __DAL_DPP_DCN10_H__
+
+#include "dpp.h"
+
+#define TO_DCN10_DPP(dpp)\
+ container_of(dpp, struct dcn10_dpp, base)
+
+/* TODO: Use correct number of taps. Using polaris values for now */
+#define LB_TOTAL_NUMBER_OF_ENTRIES 5124
+#define LB_BITS_PER_ENTRY 144
+
+#define TF_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+//Used to resolve corner case
+#define TF2_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## _ ## field_name ## post_fix
+
+#define TF_REG_LIST_DCN(id) \
+ SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\
+ SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\
+ SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\
+ SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \
+ SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \
+ SRI(OTG_H_BLANK, DSCL, id), \
+ SRI(OTG_V_BLANK, DSCL, id), \
+ SRI(SCL_MODE, DSCL, id), \
+ SRI(LB_DATA_FORMAT, DSCL, id), \
+ SRI(LB_MEMORY_CTRL, DSCL, id), \
+ SRI(DSCL_AUTOCAL, DSCL, id), \
+ SRI(SCL_BLACK_OFFSET, DSCL, id), \
+ SRI(DSCL_CONTROL, DSCL, id), \
+ SRI(SCL_TAP_CONTROL, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \
+ SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \
+ SRI(DSCL_2TAP_CONTROL, DSCL, id), \
+ SRI(MPC_SIZE, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_BOT, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \
+ SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \
+ SRI(RECOUT_START, DSCL, id), \
+ SRI(RECOUT_SIZE, DSCL, id), \
+ SRI(OBUF_CONTROL, DSCL, id), \
+ SRI(CM_ICSC_CONTROL, CM, id), \
+ SRI(CM_ICSC_C11_C12, CM, id), \
+ SRI(CM_ICSC_C33_C34, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_B, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_B, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_G, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_G, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL1_R, CM, id), \
+ SRI(CM_DGAM_RAMB_END_CNTL2_R, CM, id), \
+ SRI(CM_DGAM_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_DGAM_RAMB_REGION_14_15, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_DGAM_RAMA_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_B, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_B, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_G, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_G, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL1_R, CM, id), \
+ SRI(CM_DGAM_RAMA_END_CNTL2_R, CM, id), \
+ SRI(CM_DGAM_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_DGAM_RAMA_REGION_14_15, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_DGAM_LUT_WRITE_EN_MASK, CM, id), \
+ SRI(CM_DGAM_LUT_INDEX, CM, id), \
+ SRI(CM_DGAM_LUT_DATA, CM, id), \
+ SRI(CM_CONTROL, CM, id), \
+ SRI(CM_DGAM_CONTROL, CM, id), \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+
+
+
+#define TF_REG_LIST_DCN10(id) \
+ TF_REG_LIST_DCN(id), \
+ SRI(CM_COMA_C11_C12, CM, id),\
+ SRI(CM_COMA_C33_C34, CM, id),\
+ SRI(CM_COMB_C11_C12, CM, id),\
+ SRI(CM_COMB_C33_C34, CM, id),\
+ SRI(CM_OCSC_CONTROL, CM, id), \
+ SRI(CM_OCSC_C11_C12, CM, id), \
+ SRI(CM_OCSC_C33_C34, CM, id), \
+ SRI(CM_MEM_PWR_CTRL, CM, id), \
+ SRI(CM_RGAM_LUT_DATA, CM, id), \
+ SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\
+ SRI(CM_RGAM_LUT_INDEX, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMB_START_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMB_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_B, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_B, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_G, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_G, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL1_R, CM, id), \
+ SRI(CM_RGAM_RAMB_END_CNTL2_R, CM, id), \
+ SRI(CM_RGAM_RAMB_REGION_0_1, CM, id), \
+ SRI(CM_RGAM_RAMB_REGION_32_33, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMA_START_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_B, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_G, CM, id), \
+ SRI(CM_RGAM_RAMA_SLOPE_CNTL_R, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_B, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_B, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_G, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_G, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL1_R, CM, id), \
+ SRI(CM_RGAM_RAMA_END_CNTL2_R, CM, id), \
+ SRI(CM_RGAM_RAMA_REGION_0_1, CM, id), \
+ SRI(CM_RGAM_RAMA_REGION_32_33, CM, id), \
+ SRI(CM_RGAM_CONTROL, CM, id), \
+ SRI(CM_IGAM_CONTROL, CM, id), \
+ SRI(CM_IGAM_LUT_RW_CONTROL, CM, id), \
+ SRI(CM_IGAM_LUT_RW_INDEX, CM, id), \
+ SRI(CM_IGAM_LUT_SEQ_COLOR, CM, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CM_CMOUT_CONTROL, CM, id)
+
+
+#define TF_REG_LIST_SH_MASK_DCN(mask_sh)\
+ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\
+ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\
+ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\
+ TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\
+ TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\
+ TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\
+ TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\
+ TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\
+ TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\
+ TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\
+ TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\
+ TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\
+ TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\
+ TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\
+ TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_FRAC_BOT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_INT_BOT, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_FRAC_BOT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\
+ TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \
+ TF_SF(DSCL0_OBUF_CONTROL, OBUF_BYPASS, mask_sh), \
+ TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C33, mask_sh), \
+ TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C34, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_B, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_G, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_R, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_B, CM_DGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_G, CM_DGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_R, CM_DGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_B, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_G, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_R, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_B, CM_DGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_G, CM_DGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_R, CM_DGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh)
+
+#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN(mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_REDUCE_MODE, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, DYNAMIC_PIXEL_DEPTH, mask_sh),\
+ TF_SF(DSCL0_LB_DATA_FORMAT, DITHER_EN, mask_sh),\
+ TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C11, mask_sh),\
+ TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C12, mask_sh),\
+ TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C33, mask_sh),\
+ TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C34, mask_sh),\
+ TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C11, mask_sh),\
+ TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C12, mask_sh),\
+ TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C33, mask_sh),\
+ TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C34, mask_sh),\
+ TF_SF(CM0_CM_OCSC_CONTROL, CM_OCSC_MODE, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C11, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \
+ TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \
+ TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_SEL, mask_sh), \
+ TF_SF(CM0_CM_RGAM_LUT_INDEX, CM_RGAM_LUT_INDEX, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_B, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_G, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_R, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_B, CM_RGAM_RAMB_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_G, CM_RGAM_RAMB_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_R, CM_RGAM_RAMB_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_B, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_G, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_R, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_B, CM_RGAM_RAMA_EXP_REGION_END_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_G, CM_RGAM_RAMA_EXP_REGION_END_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_R, CM_RGAM_RAMA_EXP_REGION_END_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \
+ TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \
+ TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \
+ TF_SF(DSCL0_OBUF_CONTROL, OBUF_H_2X_UPSCALE_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_B, mask_sh), \
+ TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, mask_sh), \
+ TF_SF(CM0_CM_CONTROL, CM_BYPASS_EN, mask_sh), \
+ TF_SF(CM0_CM_IGAM_LUT_SEQ_COLOR, CM_IGAM_LUT_SEQ_COLOR, mask_sh), \
+ TF_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh), \
+ TF_SF(CM0_CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh)
+
+#define TF_REG_FIELD_LIST(type) \
+ type EXT_OVERSCAN_LEFT; \
+ type EXT_OVERSCAN_RIGHT; \
+ type EXT_OVERSCAN_BOTTOM; \
+ type EXT_OVERSCAN_TOP; \
+ type OTG_H_BLANK_START; \
+ type OTG_H_BLANK_END; \
+ type OTG_V_BLANK_START; \
+ type OTG_V_BLANK_END; \
+ type PIXEL_DEPTH; \
+ type PIXEL_EXPAN_MODE; \
+ type PIXEL_REDUCE_MODE; \
+ type DYNAMIC_PIXEL_DEPTH; \
+ type DITHER_EN; \
+ type INTERLEAVE_EN; \
+ type LB_DATA_FORMAT__ALPHA_EN; \
+ type MEMORY_CONFIG; \
+ type LB_MAX_PARTITIONS; \
+ type AUTOCAL_MODE; \
+ type AUTOCAL_NUM_PIPE; \
+ type AUTOCAL_PIPE_ID; \
+ type SCL_BLACK_OFFSET_RGB_Y; \
+ type SCL_BLACK_OFFSET_CBCR; \
+ type SCL_BOUNDARY_MODE; \
+ type SCL_V_NUM_TAPS; \
+ type SCL_H_NUM_TAPS; \
+ type SCL_V_NUM_TAPS_C; \
+ type SCL_H_NUM_TAPS_C; \
+ type SCL_COEF_RAM_TAP_PAIR_IDX; \
+ type SCL_COEF_RAM_PHASE; \
+ type SCL_COEF_RAM_FILTER_TYPE; \
+ type SCL_COEF_RAM_EVEN_TAP_COEF; \
+ type SCL_COEF_RAM_EVEN_TAP_COEF_EN; \
+ type SCL_COEF_RAM_ODD_TAP_COEF; \
+ type SCL_COEF_RAM_ODD_TAP_COEF_EN; \
+ type SCL_H_2TAP_HARDCODE_COEF_EN; \
+ type SCL_H_2TAP_SHARP_EN; \
+ type SCL_H_2TAP_SHARP_FACTOR; \
+ type SCL_V_2TAP_HARDCODE_COEF_EN; \
+ type SCL_V_2TAP_SHARP_EN; \
+ type SCL_V_2TAP_SHARP_FACTOR; \
+ type SCL_COEF_RAM_SELECT; \
+ type DSCL_MODE; \
+ type RECOUT_START_X; \
+ type RECOUT_START_Y; \
+ type RECOUT_WIDTH; \
+ type RECOUT_HEIGHT; \
+ type MPC_WIDTH; \
+ type MPC_HEIGHT; \
+ type SCL_H_SCALE_RATIO; \
+ type SCL_V_SCALE_RATIO; \
+ type SCL_H_SCALE_RATIO_C; \
+ type SCL_V_SCALE_RATIO_C; \
+ type SCL_H_INIT_FRAC; \
+ type SCL_H_INIT_INT; \
+ type SCL_H_INIT_FRAC_C; \
+ type SCL_H_INIT_INT_C; \
+ type SCL_V_INIT_FRAC; \
+ type SCL_V_INIT_INT; \
+ type SCL_V_INIT_FRAC_BOT; \
+ type SCL_V_INIT_INT_BOT; \
+ type SCL_V_INIT_FRAC_C; \
+ type SCL_V_INIT_INT_C; \
+ type SCL_V_INIT_FRAC_BOT_C; \
+ type SCL_V_INIT_INT_BOT_C; \
+ type SCL_CHROMA_COEF_MODE; \
+ type SCL_COEF_RAM_SELECT_CURRENT; \
+ type CM_GAMUT_REMAP_MODE; \
+ type CM_GAMUT_REMAP_C11; \
+ type CM_GAMUT_REMAP_C12; \
+ type CM_GAMUT_REMAP_C33; \
+ type CM_GAMUT_REMAP_C34; \
+ type CM_COMA_C11; \
+ type CM_COMA_C12; \
+ type CM_COMA_C33; \
+ type CM_COMA_C34; \
+ type CM_COMB_C11; \
+ type CM_COMB_C12; \
+ type CM_COMB_C33; \
+ type CM_COMB_C34; \
+ type CM_OCSC_MODE; \
+ type CM_OCSC_C11; \
+ type CM_OCSC_C12; \
+ type CM_OCSC_C33; \
+ type CM_OCSC_C34; \
+ type RGAM_MEM_PWR_FORCE; \
+ type CM_RGAM_LUT_DATA; \
+ type CM_RGAM_LUT_WRITE_EN_MASK; \
+ type CM_RGAM_LUT_WRITE_SEL; \
+ type CM_RGAM_LUT_INDEX; \
+ type CM_RGAM_RAMB_EXP_REGION_START_B; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_RGAM_RAMB_EXP_REGION_START_G; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_RGAM_RAMB_EXP_REGION_START_R; \
+ type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_RGAM_RAMB_EXP_REGION_END_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_RGAM_RAMB_EXP_REGION_END_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_RGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION_START_B; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_RGAM_RAMA_EXP_REGION_START_G; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_RGAM_RAMA_EXP_REGION_START_R; \
+ type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_RGAM_RAMA_EXP_REGION_END_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_RGAM_RAMA_EXP_REGION_END_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_RGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_RGAM_LUT_MODE; \
+ type CM_CMOUT_ROUND_TRUNC_MODE; \
+ type OBUF_BYPASS; \
+ type OBUF_H_2X_UPSCALE_EN; \
+ type CM_BLNDGAM_LUT_MODE; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_BLNDGAM_LUT_WRITE_EN_MASK; \
+ type CM_BLNDGAM_LUT_WRITE_SEL; \
+ type CM_BLNDGAM_LUT_INDEX; \
+ type CM_BLNDGAM_LUT_DATA; \
+ type CM_3DLUT_MODE; \
+ type CM_3DLUT_SIZE; \
+ type CM_3DLUT_INDEX; \
+ type CM_3DLUT_DATA0; \
+ type CM_3DLUT_DATA1; \
+ type CM_3DLUT_DATA_30BIT; \
+ type CM_3DLUT_WRITE_EN_MASK; \
+ type CM_3DLUT_RAM_SEL; \
+ type CM_3DLUT_30BIT_EN; \
+ type CM_3DLUT_CONFIG_STATUS; \
+ type CM_3DLUT_READ_SEL; \
+ type CM_SHAPER_LUT_MODE; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_R; \
+ type CM_SHAPER_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET; \
+ type CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_R; \
+ type CM_SHAPER_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS; \
+ type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \
+ type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \
+ type CM_SHAPER_LUT_WRITE_EN_MASK; \
+ type CM_SHAPER_LUT_WRITE_SEL; \
+ type CM_SHAPER_LUT_INDEX; \
+ type CM_SHAPER_LUT_DATA; \
+ type CM_DGAM_CONFIG_STATUS; \
+ type CM_ICSC_MODE; \
+ type CM_ICSC_C11; \
+ type CM_ICSC_C12; \
+ type CM_ICSC_C33; \
+ type CM_ICSC_C34; \
+ type CM_DGAM_RAMB_EXP_REGION_START_B; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \
+ type CM_DGAM_RAMB_EXP_REGION_START_G; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G; \
+ type CM_DGAM_RAMB_EXP_REGION_START_R; \
+ type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_B; \
+ type CM_DGAM_RAMB_EXP_REGION_END_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_G; \
+ type CM_DGAM_RAMB_EXP_REGION_END_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R; \
+ type CM_DGAM_RAMB_EXP_REGION_END_BASE_R; \
+ type CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET; \
+ type CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION_START_B; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B; \
+ type CM_DGAM_RAMA_EXP_REGION_START_G; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G; \
+ type CM_DGAM_RAMA_EXP_REGION_START_R; \
+ type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_B; \
+ type CM_DGAM_RAMA_EXP_REGION_END_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_G; \
+ type CM_DGAM_RAMA_EXP_REGION_END_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R; \
+ type CM_DGAM_RAMA_EXP_REGION_END_BASE_R; \
+ type CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \
+ type CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET; \
+ type CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \
+ type SHARED_MEM_PWR_DIS; \
+ type CM_IGAM_LUT_FORMAT_R; \
+ type CM_IGAM_LUT_FORMAT_G; \
+ type CM_IGAM_LUT_FORMAT_B; \
+ type CM_IGAM_LUT_HOST_EN; \
+ type CM_IGAM_LUT_RW_MODE; \
+ type CM_IGAM_LUT_WRITE_EN_MASK; \
+ type CM_IGAM_LUT_SEL; \
+ type CM_IGAM_LUT_SEQ_COLOR; \
+ type CM_IGAM_DGAM_CONFIG_STATUS; \
+ type CM_DGAM_LUT_WRITE_EN_MASK; \
+ type CM_DGAM_LUT_WRITE_SEL; \
+ type CM_DGAM_LUT_INDEX; \
+ type CM_DGAM_LUT_DATA; \
+ type CM_DGAM_LUT_MODE; \
+ type CM_IGAM_LUT_MODE; \
+ type CM_IGAM_INPUT_FORMAT; \
+ type CM_IGAM_LUT_RW_INDEX; \
+ type CM_BYPASS_EN; \
+ type FORMAT_EXPANSION_MODE; \
+ type CNVC_BYPASS; \
+ type OUTPUT_FP; \
+ type CNVC_SURFACE_PIXEL_FORMAT; \
+ type CURSOR_MODE; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CUR0_MODE; \
+ type CUR0_EXPANSION_MODE; \
+ type CUR0_ENABLE; \
+ type CM_BYPASS; \
+ type FORMAT_CONTROL__ALPHA_EN; \
+ type CUR0_COLOR0; \
+ type CUR0_COLOR1
+
+
+
+struct dcn_dpp_shift {
+ TF_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn_dpp_mask {
+ TF_REG_FIELD_LIST(uint32_t);
+};
+
+
+
+
+struct dcn_dpp_registers {
+ uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT;
+ uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM;
+ uint32_t OTG_H_BLANK;
+ uint32_t OTG_V_BLANK;
+ uint32_t SCL_MODE;
+ uint32_t LB_DATA_FORMAT;
+ uint32_t LB_MEMORY_CTRL;
+ uint32_t DSCL_AUTOCAL;
+ uint32_t SCL_BLACK_OFFSET;
+ uint32_t DSCL_CONTROL;
+ uint32_t SCL_TAP_CONTROL;
+ uint32_t SCL_COEF_RAM_TAP_SELECT;
+ uint32_t SCL_COEF_RAM_TAP_DATA;
+ uint32_t DSCL_2TAP_CONTROL;
+ uint32_t MPC_SIZE;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO;
+ uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C;
+ uint32_t SCL_VERT_FILTER_SCALE_RATIO_C;
+ uint32_t SCL_HORZ_FILTER_INIT;
+ uint32_t SCL_HORZ_FILTER_INIT_C;
+ uint32_t SCL_VERT_FILTER_INIT;
+ uint32_t SCL_VERT_FILTER_INIT_BOT;
+ uint32_t SCL_VERT_FILTER_INIT_C;
+ uint32_t SCL_VERT_FILTER_INIT_BOT_C;
+ uint32_t RECOUT_START;
+ uint32_t RECOUT_SIZE;
+ uint32_t CM_GAMUT_REMAP_CONTROL;
+ uint32_t CM_GAMUT_REMAP_C11_C12;
+ uint32_t CM_GAMUT_REMAP_C33_C34;
+ uint32_t CM_COMA_C11_C12;
+ uint32_t CM_COMA_C33_C34;
+ uint32_t CM_COMB_C11_C12;
+ uint32_t CM_COMB_C33_C34;
+ uint32_t CM_OCSC_CONTROL;
+ uint32_t CM_OCSC_C11_C12;
+ uint32_t CM_OCSC_C33_C34;
+ uint32_t CM_MEM_PWR_CTRL;
+ uint32_t CM_RGAM_LUT_DATA;
+ uint32_t CM_RGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_RGAM_LUT_INDEX;
+ uint32_t CM_RGAM_RAMB_START_CNTL_B;
+ uint32_t CM_RGAM_RAMB_START_CNTL_G;
+ uint32_t CM_RGAM_RAMB_START_CNTL_R;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_RGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_RGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_RGAM_RAMB_REGION_0_1;
+ uint32_t CM_RGAM_RAMB_REGION_32_33;
+ uint32_t CM_RGAM_RAMA_START_CNTL_B;
+ uint32_t CM_RGAM_RAMA_START_CNTL_G;
+ uint32_t CM_RGAM_RAMA_START_CNTL_R;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_RGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_RGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_RGAM_RAMA_REGION_0_1;
+ uint32_t CM_RGAM_RAMA_REGION_32_33;
+ uint32_t CM_RGAM_CONTROL;
+ uint32_t CM_CMOUT_CONTROL;
+ uint32_t OBUF_CONTROL;
+ uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_BLNDGAM_CONTROL;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMB_START_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_BLNDGAM_RAMB_REGION_0_1;
+ uint32_t CM_BLNDGAM_RAMB_REGION_2_3;
+ uint32_t CM_BLNDGAM_RAMB_REGION_4_5;
+ uint32_t CM_BLNDGAM_RAMB_REGION_6_7;
+ uint32_t CM_BLNDGAM_RAMB_REGION_8_9;
+ uint32_t CM_BLNDGAM_RAMB_REGION_10_11;
+ uint32_t CM_BLNDGAM_RAMB_REGION_12_13;
+ uint32_t CM_BLNDGAM_RAMB_REGION_14_15;
+ uint32_t CM_BLNDGAM_RAMB_REGION_16_17;
+ uint32_t CM_BLNDGAM_RAMB_REGION_18_19;
+ uint32_t CM_BLNDGAM_RAMB_REGION_20_21;
+ uint32_t CM_BLNDGAM_RAMB_REGION_22_23;
+ uint32_t CM_BLNDGAM_RAMB_REGION_24_25;
+ uint32_t CM_BLNDGAM_RAMB_REGION_26_27;
+ uint32_t CM_BLNDGAM_RAMB_REGION_28_29;
+ uint32_t CM_BLNDGAM_RAMB_REGION_30_31;
+ uint32_t CM_BLNDGAM_RAMB_REGION_32_33;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMA_START_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_BLNDGAM_RAMA_REGION_0_1;
+ uint32_t CM_BLNDGAM_RAMA_REGION_2_3;
+ uint32_t CM_BLNDGAM_RAMA_REGION_4_5;
+ uint32_t CM_BLNDGAM_RAMA_REGION_6_7;
+ uint32_t CM_BLNDGAM_RAMA_REGION_8_9;
+ uint32_t CM_BLNDGAM_RAMA_REGION_10_11;
+ uint32_t CM_BLNDGAM_RAMA_REGION_12_13;
+ uint32_t CM_BLNDGAM_RAMA_REGION_14_15;
+ uint32_t CM_BLNDGAM_RAMA_REGION_16_17;
+ uint32_t CM_BLNDGAM_RAMA_REGION_18_19;
+ uint32_t CM_BLNDGAM_RAMA_REGION_20_21;
+ uint32_t CM_BLNDGAM_RAMA_REGION_22_23;
+ uint32_t CM_BLNDGAM_RAMA_REGION_24_25;
+ uint32_t CM_BLNDGAM_RAMA_REGION_26_27;
+ uint32_t CM_BLNDGAM_RAMA_REGION_28_29;
+ uint32_t CM_BLNDGAM_RAMA_REGION_30_31;
+ uint32_t CM_BLNDGAM_RAMA_REGION_32_33;
+ uint32_t CM_BLNDGAM_LUT_INDEX;
+ uint32_t CM_BLNDGAM_LUT_DATA;
+ uint32_t CM_3DLUT_MODE;
+ uint32_t CM_3DLUT_INDEX;
+ uint32_t CM_3DLUT_DATA;
+ uint32_t CM_3DLUT_DATA_30BIT;
+ uint32_t CM_3DLUT_READ_WRITE_CONTROL;
+ uint32_t CM_SHAPER_LUT_WRITE_EN_MASK;
+ uint32_t CM_SHAPER_CONTROL;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_B;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_G;
+ uint32_t CM_SHAPER_RAMB_START_CNTL_R;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_B;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_G;
+ uint32_t CM_SHAPER_RAMB_END_CNTL_R;
+ uint32_t CM_SHAPER_RAMB_REGION_0_1;
+ uint32_t CM_SHAPER_RAMB_REGION_2_3;
+ uint32_t CM_SHAPER_RAMB_REGION_4_5;
+ uint32_t CM_SHAPER_RAMB_REGION_6_7;
+ uint32_t CM_SHAPER_RAMB_REGION_8_9;
+ uint32_t CM_SHAPER_RAMB_REGION_10_11;
+ uint32_t CM_SHAPER_RAMB_REGION_12_13;
+ uint32_t CM_SHAPER_RAMB_REGION_14_15;
+ uint32_t CM_SHAPER_RAMB_REGION_16_17;
+ uint32_t CM_SHAPER_RAMB_REGION_18_19;
+ uint32_t CM_SHAPER_RAMB_REGION_20_21;
+ uint32_t CM_SHAPER_RAMB_REGION_22_23;
+ uint32_t CM_SHAPER_RAMB_REGION_24_25;
+ uint32_t CM_SHAPER_RAMB_REGION_26_27;
+ uint32_t CM_SHAPER_RAMB_REGION_28_29;
+ uint32_t CM_SHAPER_RAMB_REGION_30_31;
+ uint32_t CM_SHAPER_RAMB_REGION_32_33;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_B;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_G;
+ uint32_t CM_SHAPER_RAMA_START_CNTL_R;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_B;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_G;
+ uint32_t CM_SHAPER_RAMA_END_CNTL_R;
+ uint32_t CM_SHAPER_RAMA_REGION_0_1;
+ uint32_t CM_SHAPER_RAMA_REGION_2_3;
+ uint32_t CM_SHAPER_RAMA_REGION_4_5;
+ uint32_t CM_SHAPER_RAMA_REGION_6_7;
+ uint32_t CM_SHAPER_RAMA_REGION_8_9;
+ uint32_t CM_SHAPER_RAMA_REGION_10_11;
+ uint32_t CM_SHAPER_RAMA_REGION_12_13;
+ uint32_t CM_SHAPER_RAMA_REGION_14_15;
+ uint32_t CM_SHAPER_RAMA_REGION_16_17;
+ uint32_t CM_SHAPER_RAMA_REGION_18_19;
+ uint32_t CM_SHAPER_RAMA_REGION_20_21;
+ uint32_t CM_SHAPER_RAMA_REGION_22_23;
+ uint32_t CM_SHAPER_RAMA_REGION_24_25;
+ uint32_t CM_SHAPER_RAMA_REGION_26_27;
+ uint32_t CM_SHAPER_RAMA_REGION_28_29;
+ uint32_t CM_SHAPER_RAMA_REGION_30_31;
+ uint32_t CM_SHAPER_RAMA_REGION_32_33;
+ uint32_t CM_SHAPER_LUT_INDEX;
+ uint32_t CM_SHAPER_LUT_DATA;
+ uint32_t CM_ICSC_CONTROL;
+ uint32_t CM_ICSC_C11_C12;
+ uint32_t CM_ICSC_C33_C34;
+ uint32_t CM_DGAM_RAMB_START_CNTL_B;
+ uint32_t CM_DGAM_RAMB_START_CNTL_G;
+ uint32_t CM_DGAM_RAMB_START_CNTL_R;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G;
+ uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_B;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_B;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_G;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_G;
+ uint32_t CM_DGAM_RAMB_END_CNTL1_R;
+ uint32_t CM_DGAM_RAMB_END_CNTL2_R;
+ uint32_t CM_DGAM_RAMB_REGION_0_1;
+ uint32_t CM_DGAM_RAMB_REGION_14_15;
+ uint32_t CM_DGAM_RAMA_START_CNTL_B;
+ uint32_t CM_DGAM_RAMA_START_CNTL_G;
+ uint32_t CM_DGAM_RAMA_START_CNTL_R;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G;
+ uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_B;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_B;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_G;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_G;
+ uint32_t CM_DGAM_RAMA_END_CNTL1_R;
+ uint32_t CM_DGAM_RAMA_END_CNTL2_R;
+ uint32_t CM_DGAM_RAMA_REGION_0_1;
+ uint32_t CM_DGAM_RAMA_REGION_14_15;
+ uint32_t CM_DGAM_LUT_WRITE_EN_MASK;
+ uint32_t CM_DGAM_LUT_INDEX;
+ uint32_t CM_DGAM_LUT_DATA;
+ uint32_t CM_CONTROL;
+ uint32_t CM_DGAM_CONTROL;
+ uint32_t CM_IGAM_CONTROL;
+ uint32_t CM_IGAM_LUT_RW_CONTROL;
+ uint32_t CM_IGAM_LUT_RW_INDEX;
+ uint32_t CM_IGAM_LUT_SEQ_COLOR;
+ uint32_t FORMAT_CONTROL;
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR0_CONTROL;
+ uint32_t CURSOR0_COLOR0;
+ uint32_t CURSOR0_COLOR1;
+};
+
+struct dcn10_dpp {
+ struct dpp base;
+
+ const struct dcn_dpp_registers *tf_regs;
+ const struct dcn_dpp_shift *tf_shift;
+ const struct dcn_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+};
+
+enum dcn10_input_csc_select {
+ INPUT_CSC_SELECT_BYPASS = 0,
+ INPUT_CSC_SELECT_ICSC,
+ INPUT_CSC_SELECT_COMA
+};
+
+bool dpp1_dscl_is_lb_conf_valid(
+ int ceil_vratio,
+ int num_partitions,
+ int vtaps);
+
+void dpp1_dscl_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+
+void dpp1_degamma_ram_select(
+ struct dpp *dpp_base,
+ bool use_ram_a);
+
+void dpp1_program_degamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+void dpp1_program_degamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+void dpp1_program_degamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a);
+
+void dpp1_power_on_degamma_lut(
+ struct dpp *dpp_base,
+ bool power_on);
+
+void dpp1_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select select);
+
+void dpp1_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma);
+
+void dpp1_full_bypass(struct dpp *dpp_base);
+
+void dpp1_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode);
+
+void dpp1_set_degamma_pwl(struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+bool dpp_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+void dpp_reset(struct dpp *dpp_base);
+
+void dpp1_cm_program_regamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+void dpp1_cm_power_on_regamma_lut(
+ struct dpp *dpp_base,
+ bool power_on);
+
+void dpp1_cm_configure_regamma_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a);
+
+/*program re gamma RAM A*/
+void dpp1_cm_program_regamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+/*program re gamma RAM B*/
+void dpp1_cm_program_regamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params);
+void dpp1_cm_set_output_csc_adjustment(
+ struct dpp *dpp_base,
+ const struct out_csc_color_matrix *tbl_entry);
+
+void dpp1_cm_set_output_csc_default(
+ struct dpp *dpp_base,
+ const struct default_adjustment *default_adjust);
+
+void dpp1_cm_set_gamut_remap(
+ struct dpp *dpp,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+void dpp1_dscl_set_scaler_manual_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data);
+
+void dpp1_cnv_setup (
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+void dpp1_full_bypass(struct dpp *dpp_base);
+
+void dpp1_construct(struct dcn10_dpp *dpp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_dpp_registers *tf_regs,
+ const struct dcn_dpp_shift *tf_shift,
+ const struct dcn_dpp_mask *tf_mask);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
new file mode 100644
index 000000000000..40627c244bf5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -0,0 +1,816 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+#include "dcn10_cm_common.h"
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+struct dcn10_input_csc_matrix {
+ enum dc_color_space color_space;
+ uint16_t regval[12];
+};
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
+
+static const struct dcn10_input_csc_matrix dcn10_input_csc_matrix[] = {
+ {COLOR_SPACE_SRGB,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_SRGB_LIMITED,
+ {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
+ {COLOR_SPACE_YCBCR601,
+ {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
+ 0, 0x2000, 0x38b4, 0xe3a6} },
+ {COLOR_SPACE_YCBCR601_LIMITED,
+ {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
+ 0, 0x2568, 0x40de, 0xdd3a} },
+ {COLOR_SPACE_YCBCR709,
+ {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
+ 0x2000, 0x3b61, 0xe24f} },
+
+ {COLOR_SPACE_YCBCR709_LIMITED,
+ {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
+ 0x2568, 0x43ee, 0xdbb2} }
+};
+
+
+
+static void program_gamut_remap(
+ struct dcn10_dpp *dpp,
+ const uint16_t *regval,
+ enum gamut_remap_select select)
+{
+ uint16_t selection = 0;
+ struct color_matrices_reg gam_regs;
+
+ if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
+ REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, 0);
+ return;
+ }
+ switch (select) {
+ case GAMUT_REMAP_COEFF:
+ selection = 1;
+ break;
+ case GAMUT_REMAP_COMA_COEFF:
+ selection = 2;
+ break;
+ case GAMUT_REMAP_COMB_COEFF:
+ selection = 3;
+ break;
+ default:
+ break;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
+
+
+ if (select == GAMUT_REMAP_COEFF) {
+ gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else if (select == GAMUT_REMAP_COMA_COEFF) {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+
+ REG_SET(
+ CM_GAMUT_REMAP_CONTROL, 0,
+ CM_GAMUT_REMAP_MODE, selection);
+
+}
+
+void dpp1_cm_set_gamut_remap(
+ struct dpp *dpp_base,
+ const struct dpp_grph_csc_adjustment *adjust)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
+ /* Bypass if type is bypass or hw */
+ program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
+ else {
+ struct fixed31_32 arr_matrix[12];
+ uint16_t arr_reg_val[12];
+
+ arr_matrix[0] = adjust->temperature_matrix[0];
+ arr_matrix[1] = adjust->temperature_matrix[1];
+ arr_matrix[2] = adjust->temperature_matrix[2];
+ arr_matrix[3] = dal_fixed31_32_zero;
+
+ arr_matrix[4] = adjust->temperature_matrix[3];
+ arr_matrix[5] = adjust->temperature_matrix[4];
+ arr_matrix[6] = adjust->temperature_matrix[5];
+ arr_matrix[7] = dal_fixed31_32_zero;
+
+ arr_matrix[8] = adjust->temperature_matrix[6];
+ arr_matrix[9] = adjust->temperature_matrix[7];
+ arr_matrix[10] = adjust->temperature_matrix[8];
+ arr_matrix[11] = dal_fixed31_32_zero;
+
+ convert_float_matrix(
+ arr_reg_val, arr_matrix, 12);
+
+ program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
+ }
+}
+
+void dpp1_cm_set_output_csc_default(
+ struct dpp *dpp_base,
+ const struct default_adjustment *default_adjust)
+{
+
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ uint32_t ocsc_mode = 0;
+
+ if (default_adjust != NULL) {
+ switch (default_adjust->out_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ ocsc_mode = 0;
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ ocsc_mode = 1;
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ ocsc_mode = 2;
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ ocsc_mode = 3;
+ break;
+ case COLOR_SPACE_UNKNOWN:
+ default:
+ break;
+ }
+ }
+
+ REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+
+}
+
+static void dpp1_cm_get_reg_field(
+ struct dcn10_dpp *dpp,
+ struct xfer_func_reg *reg)
+{
+ reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
+ reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
+ reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
+ reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+ reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
+
+ reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
+ reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
+ reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
+ reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
+ reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
+ reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
+ reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
+ reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
+ reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
+}
+
+static void dpp1_cm_program_color_matrix(
+ struct dcn10_dpp *dpp,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ uint32_t mode;
+ struct color_matrices_reg gam_regs;
+
+ REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode);
+
+ if (tbl_entry == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
+
+ if (mode == 4) {
+
+ gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ tbl_entry->regval,
+ &gam_regs);
+
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ tbl_entry->regval,
+ &gam_regs);
+ }
+}
+
+void dpp1_cm_set_output_csc_adjustment(
+ struct dpp *dpp_base,
+ const struct out_csc_color_matrix *tbl_entry)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ //enum csc_color_mode config = CSC_COLOR_MODE_GRAPHICS_OUTPUT_CSC;
+ uint32_t ocsc_mode = 4;
+
+ /**
+ *if (tbl_entry != NULL) {
+ * switch (tbl_entry->color_space) {
+ * case COLOR_SPACE_SRGB:
+ * case COLOR_SPACE_2020_RGB_FULLRANGE:
+ * ocsc_mode = 0;
+ * break;
+ * case COLOR_SPACE_SRGB_LIMITED:
+ * case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ * ocsc_mode = 1;
+ * break;
+ * case COLOR_SPACE_YCBCR601:
+ * case COLOR_SPACE_YCBCR601_LIMITED:
+ * ocsc_mode = 2;
+ * break;
+ * case COLOR_SPACE_YCBCR709:
+ * case COLOR_SPACE_YCBCR709_LIMITED:
+ * case COLOR_SPACE_2020_YCBCR:
+ * ocsc_mode = 3;
+ * break;
+ * case COLOR_SPACE_UNKNOWN:
+ * default:
+ * break;
+ * }
+ *}
+ */
+
+ REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
+ dpp1_cm_program_color_matrix(dpp, tbl_entry);
+}
+
+void dpp1_cm_power_on_regamma_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
+
+}
+
+void dpp1_cm_program_regamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num)
+{
+ uint32_t i;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_RGAM_LUT_DATA, 0,
+ CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
+
+ }
+
+}
+
+void dpp1_cm_configure_regamma_lut(
+ struct dpp *dpp_base,
+ bool is_ram_a)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
+ CM_RGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
+ CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
+ REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
+}
+
+/*program re gamma RAM A*/
+void dpp1_cm_program_regamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+
+}
+
+/*program re gamma RAM B*/
+void dpp1_cm_program_regamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+void dpp1_program_input_csc(
+ struct dpp *dpp_base,
+ enum dc_color_space color_space,
+ enum dcn10_input_csc_select select)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ int i;
+ int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix);
+ const uint16_t *regval = NULL;
+ uint32_t selection = 1;
+ struct color_matrices_reg gam_regs;
+
+ if (select == INPUT_CSC_SELECT_BYPASS) {
+ REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
+ return;
+ }
+
+ for (i = 0; i < arr_size; i++)
+ if (dcn10_input_csc_matrix[i].color_space == color_space) {
+ regval = dcn10_input_csc_matrix[i].regval;
+ break;
+ }
+
+ if (regval == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ if (select == INPUT_CSC_SELECT_COMA)
+ selection = 2;
+ REG_SET(CM_ICSC_CONTROL, 0,
+ CM_ICSC_MODE, selection);
+
+ gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
+ gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
+ gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
+ gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
+
+
+ if (select == INPUT_CSC_SELECT_ICSC) {
+
+ gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ } else {
+
+ gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
+ gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
+
+ cm_helper_program_color_matrices(
+ dpp->base.ctx,
+ regval,
+ &gam_regs);
+ }
+}
+
+/*program de gamma RAM B*/
+void dpp1_program_degamma_lutb_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
+ gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
+
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+/*program de gamma RAM A*/
+void dpp1_program_degamma_luta_settings(
+ struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ struct xfer_func_reg gam_regs;
+
+ dpp1_cm_get_reg_field(dpp, &gam_regs);
+
+ gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
+ gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
+ gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
+ gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
+ gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
+ gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
+ gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
+ gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
+ gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
+ gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
+ gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
+ gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
+ gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
+ gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
+
+ cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
+}
+
+void dpp1_power_on_degamma_lut(
+ struct dpp *dpp_base,
+ bool power_on)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_SET(CM_MEM_PWR_CTRL, 0,
+ SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
+
+}
+
+static void dpp1_enable_cm_block(
+ struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
+ REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
+}
+
+void dpp1_set_degamma(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ dpp1_enable_cm_block(dpp_base);
+
+ switch (mode) {
+ case IPP_DEGAMMA_MODE_BYPASS:
+ /* Setting de gamma bypass for now */
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
+ break;
+ case IPP_DEGAMMA_MODE_HW_sRGB:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
+ break;
+ case IPP_DEGAMMA_MODE_HW_xvYCC:
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+void dpp1_degamma_ram_select(
+ struct dpp *dpp_base,
+ bool use_ram_a)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ if (use_ram_a)
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
+ else
+ REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
+
+}
+
+static bool dpp1_degamma_ram_inuse(
+ struct dpp *dpp_base,
+ bool *ram_a_inuse)
+{
+ bool ret = false;
+ uint32_t status_reg = 0;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
+ &status_reg);
+
+ if (status_reg == 9) {
+ *ram_a_inuse = true;
+ ret = true;
+ } else if (status_reg == 10) {
+ *ram_a_inuse = false;
+ ret = true;
+ }
+ return ret;
+}
+
+void dpp1_program_degamma_lut(
+ struct dpp *dpp_base,
+ const struct pwl_result_data *rgb,
+ uint32_t num,
+ bool is_ram_a)
+{
+ uint32_t i;
+
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
+ CM_DGAM_LUT_WRITE_EN_MASK, 7);
+ REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
+ is_ram_a == true ? 0:1);
+
+ REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
+ for (i = 0 ; i < num; i++) {
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
+
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
+ REG_SET(CM_DGAM_LUT_DATA, 0,
+ CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
+ }
+}
+
+void dpp1_set_degamma_pwl(struct dpp *dpp_base,
+ const struct pwl_params *params)
+{
+ bool is_ram_a = true;
+
+ dpp1_power_on_degamma_lut(dpp_base, true);
+ dpp1_enable_cm_block(dpp_base);
+ dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
+ if (is_ram_a == true)
+ dpp1_program_degamma_lutb_settings(dpp_base, params);
+ else
+ dpp1_program_degamma_luta_settings(dpp_base, params);
+
+ dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
+ params->hw_points_num, !is_ram_a);
+ dpp1_degamma_ram_select(dpp_base, !is_ram_a);
+}
+
+void dpp1_full_bypass(struct dpp *dpp_base)
+{
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ /* Input pixel format: ARGB8888 */
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, 0x8);
+
+ /* Zero expansion */
+ REG_SET_3(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_CONTROL__ALPHA_EN, 0,
+ FORMAT_EXPANSION_MODE, 0);
+
+ /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
+ if (dpp->tf_mask->CM_BYPASS_EN)
+ REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
+
+ /* Setting degamma bypass for now */
+ REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
+}
+
+static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
+ bool *ram_a_inuse)
+{
+ bool in_use = false;
+ uint32_t status_reg = 0;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+
+ REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
+ &status_reg);
+
+ // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
+ if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
+ *ram_a_inuse = true;
+ in_use = true;
+ // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
+ } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
+ *ram_a_inuse = false;
+ in_use = true;
+ }
+ return in_use;
+}
+
+/*
+ * Input gamma LUT currently supports 256 values only. This means input color
+ * can have a maximum of 8 bits per channel (= 256 possible values) in order to
+ * have a one-to-one mapping with the LUT. Truncation will occur with color
+ * values greater than 8 bits.
+ *
+ * In the future, this function should support additional input gamma methods,
+ * such as piecewise linear mapping, and input gamma bypass.
+ */
+void dpp1_program_input_lut(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma)
+{
+ int i;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ bool rama_occupied = false;
+ uint32_t ram_num;
+ // Power on LUT memory.
+ REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
+ dpp1_enable_cm_block(dpp_base);
+ // Determine whether to use RAM A or RAM B
+ dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
+ if (!rama_occupied)
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
+ else
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
+ // RW mode is 256-entry LUT
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
+ // IGAM Input format should be 8 bits per channel.
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
+ // Do not mask any R,G,B values
+ REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
+ // LUT-256, unsigned, integer, new u0.12 format
+ REG_UPDATE_3(
+ CM_IGAM_CONTROL,
+ CM_IGAM_LUT_FORMAT_R, 3,
+ CM_IGAM_LUT_FORMAT_G, 3,
+ CM_IGAM_LUT_FORMAT_B, 3);
+ // Start at index 0 of IGAM LUT
+ REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
+ for (i = 0; i < gamma->num_entries; i++) {
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.red[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.green[i]));
+ REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
+ dal_fixed31_32_round(
+ gamma->entries.blue[i]));
+ }
+ // Power off LUT memory
+ REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
+ // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
+ REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
+ REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
new file mode 100644
index 000000000000..cbad36410b32
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn10_dpp.h"
+#include "basics/conversion.h"
+
+
+#define NUM_PHASES 64
+#define HORZ_MAX_TAPS 8
+#define VERT_MAX_TAPS 8
+
+#define BLACK_OFFSET_RGB_Y 0x0
+#define BLACK_OFFSET_CBCR 0x8000
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+enum dcn10_coef_filter_type_sel {
+ SCL_COEF_LUMA_VERT_FILTER = 0,
+ SCL_COEF_LUMA_HORZ_FILTER = 1,
+ SCL_COEF_CHROMA_VERT_FILTER = 2,
+ SCL_COEF_CHROMA_HORZ_FILTER = 3,
+ SCL_COEF_ALPHA_VERT_FILTER = 4,
+ SCL_COEF_ALPHA_HORZ_FILTER = 5
+};
+
+enum dscl_autocal_mode {
+ AUTOCAL_MODE_OFF = 0,
+
+ /* Autocal calculate the scaling ratio and initial phase and the
+ * DSCL_MODE_SEL must be set to 1
+ */
+ AUTOCAL_MODE_AUTOSCALE = 1,
+ /* Autocal perform auto centering without replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOCENTER = 2,
+ /* Autocal perform auto centering and auto replication and the
+ * DSCL_MODE_SEL must be set to 0
+ */
+ AUTOCAL_MODE_AUTOREPLICATE = 3
+};
+
+enum dscl_mode_sel {
+ DSCL_MODE_SCALING_444_BYPASS = 0,
+ DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DSCL_MODE_DSCL_BYPASS = 6
+};
+
+static void dpp1_dscl_set_overscan(
+ struct dcn10_dpp *dpp,
+ const struct scaler_data *data)
+{
+ uint32_t left = data->recout.x;
+ uint32_t top = data->recout.y;
+
+ int right = data->h_active - data->recout.x - data->recout.width;
+ int bottom = data->v_active - data->recout.y - data->recout.height;
+
+ if (right < 0) {
+ BREAK_TO_DEBUGGER();
+ right = 0;
+ }
+ if (bottom < 0) {
+ BREAK_TO_DEBUGGER();
+ bottom = 0;
+ }
+
+ REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0,
+ EXT_OVERSCAN_LEFT, left,
+ EXT_OVERSCAN_RIGHT, right);
+
+ REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0,
+ EXT_OVERSCAN_BOTTOM, bottom,
+ EXT_OVERSCAN_TOP, top);
+}
+
+static void dpp1_dscl_set_otg_blank(
+ struct dcn10_dpp *dpp, const struct scaler_data *data)
+{
+ uint32_t h_blank_start = data->h_active;
+ uint32_t h_blank_end = 0;
+ uint32_t v_blank_start = data->v_active;
+ uint32_t v_blank_end = 0;
+
+ REG_SET_2(OTG_H_BLANK, 0,
+ OTG_H_BLANK_START, h_blank_start,
+ OTG_H_BLANK_END, h_blank_end);
+
+ REG_SET_2(OTG_V_BLANK, 0,
+ OTG_V_BLANK_START, v_blank_start,
+ OTG_V_BLANK_END, v_blank_end);
+}
+
+static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
+{
+ if (depth == LB_PIXEL_DEPTH_30BPP)
+ return 0; /* 10 bpc */
+ else if (depth == LB_PIXEL_DEPTH_24BPP)
+ return 1; /* 8 bpc */
+ else if (depth == LB_PIXEL_DEPTH_18BPP)
+ return 2; /* 6 bpc */
+ else if (depth == LB_PIXEL_DEPTH_36BPP)
+ return 3; /* 12 bpc */
+ else {
+ ASSERT(0);
+ return -1; /* Unsupported */
+ }
+}
+
+static bool dpp1_dscl_is_video_format(enum pixel_format format)
+{
+ if (format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && format <= PIXEL_FORMAT_VIDEO_END)
+ return true;
+ else
+ return false;
+}
+
+static bool dpp1_dscl_is_420_format(enum pixel_format format)
+{
+ if (format == PIXEL_FORMAT_420BPP8 ||
+ format == PIXEL_FORMAT_420BPP10)
+ return true;
+ else
+ return false;
+}
+
+static enum dscl_mode_sel dpp1_dscl_get_dscl_mode(
+ struct dpp *dpp_base,
+ const struct scaler_data *data,
+ bool dbg_always_scale)
+{
+ const long long one = dal_fixed31_32_one.value;
+
+ if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
+ /* DSCL is processing data in fixed format */
+ if (data->format == PIXEL_FORMAT_FP16)
+ return DSCL_MODE_DSCL_BYPASS;
+ }
+
+ if (data->ratios.horz.value == one
+ && data->ratios.vert.value == one
+ && data->ratios.horz_c.value == one
+ && data->ratios.vert_c.value == one
+ && !dbg_always_scale)
+ return DSCL_MODE_SCALING_444_BYPASS;
+
+ if (!dpp1_dscl_is_420_format(data->format)) {
+ if (dpp1_dscl_is_video_format(data->format))
+ return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ else
+ return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ }
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
+ return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+
+ return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+}
+
+static void dpp1_dscl_set_lb(
+ struct dcn10_dpp *dpp,
+ const struct line_buffer_params *lb_params,
+ enum lb_memory_config mem_size_config)
+{
+ /* LB */
+ if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
+ /* DSCL caps: pixel data processed in fixed format */
+ uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth);
+ uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth;
+
+ REG_SET_7(LB_DATA_FORMAT, 0,
+ PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */
+ PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */
+ PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */
+ DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */
+ DITHER_EN, 0, /* Dithering enable: Disabled */
+ INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */
+ LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */
+ }
+
+ REG_SET_2(LB_MEMORY_CTRL, 0,
+ MEMORY_CONFIG, mem_size_config,
+ LB_MAX_PARTITIONS, 63);
+}
+
+static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
+{
+ if (taps == 8)
+ return get_filter_8tap_64p(ratio);
+ else if (taps == 7)
+ return get_filter_7tap_64p(ratio);
+ else if (taps == 6)
+ return get_filter_6tap_64p(ratio);
+ else if (taps == 5)
+ return get_filter_5tap_64p(ratio);
+ else if (taps == 4)
+ return get_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return get_filter_3tap_64p(ratio);
+ else if (taps == 2)
+ return get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+static void dpp1_dscl_set_scaler_filter(
+ struct dcn10_dpp *dpp,
+ uint32_t taps,
+ enum dcn10_coef_filter_type_sel filter_type,
+ const uint16_t *filter)
+{
+ const int tap_pairs = (taps + 1) / 2;
+ int phase;
+ int pair;
+ uint16_t odd_coef, even_coef;
+
+ REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0,
+ SCL_COEF_RAM_TAP_PAIR_IDX, 0,
+ SCL_COEF_RAM_PHASE, 0,
+ SCL_COEF_RAM_FILTER_TYPE, filter_type);
+
+ for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) {
+ for (pair = 0; pair < tap_pairs; pair++) {
+ even_coef = filter[phase * taps + 2 * pair];
+ if ((pair * 2 + 1) < taps)
+ odd_coef = filter[phase * taps + 2 * pair + 1];
+ else
+ odd_coef = 0;
+
+ REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0,
+ /* Even tap coefficient (bits 1:0 fixed to 0) */
+ SCL_COEF_RAM_EVEN_TAP_COEF, even_coef,
+ /* Write/read control for even coefficient */
+ SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1,
+ /* Odd tap coefficient (bits 1:0 fixed to 0) */
+ SCL_COEF_RAM_ODD_TAP_COEF, odd_coef,
+ /* Write/read control for odd coefficient */
+ SCL_COEF_RAM_ODD_TAP_COEF_EN, 1);
+ }
+ }
+
+}
+
+static void dpp1_dscl_set_scl_filter(
+ struct dcn10_dpp *dpp,
+ const struct scaler_data *scl_data,
+ bool chroma_coef_mode)
+{
+ bool h_2tap_hardcode_coef_en = false;
+ bool v_2tap_hardcode_coef_en = false;
+ bool h_2tap_sharp_en = false;
+ bool v_2tap_sharp_en = false;
+ uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz;
+ uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert;
+ bool coef_ram_current;
+ const uint16_t *filter_h = NULL;
+ const uint16_t *filter_v = NULL;
+ const uint16_t *filter_h_c = NULL;
+ const uint16_t *filter_v_c = NULL;
+
+ h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3
+ && scl_data->taps.h_taps_c < 3
+ && (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1);
+ v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3
+ && scl_data->taps.v_taps_c < 3
+ && (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1);
+
+ h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0;
+ v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0;
+
+ REG_UPDATE_6(DSCL_2TAP_CONTROL,
+ SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en,
+ SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en,
+ SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor,
+ SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en,
+ SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en,
+ SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor);
+
+ if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) {
+ bool filter_updated = false;
+
+ filter_h = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.h_taps, scl_data->ratios.horz);
+ filter_v = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.v_taps, scl_data->ratios.vert);
+
+ filter_updated = (filter_h && (filter_h != dpp->filter_h))
+ || (filter_v && (filter_v != dpp->filter_v));
+
+ if (chroma_coef_mode) {
+ filter_h_c = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.h_taps_c, scl_data->ratios.horz_c);
+ filter_v_c = dpp1_dscl_get_filter_coeffs_64p(
+ scl_data->taps.v_taps_c, scl_data->ratios.vert_c);
+ filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c))
+ || (filter_v_c && (filter_v_c != dpp->filter_v_c));
+ }
+
+ if (filter_updated) {
+ uint32_t scl_mode = REG_READ(SCL_MODE);
+
+ if (!h_2tap_hardcode_coef_en && filter_h) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.h_taps,
+ SCL_COEF_LUMA_HORZ_FILTER, filter_h);
+ }
+ dpp->filter_h = filter_h;
+ if (!v_2tap_hardcode_coef_en && filter_v) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.v_taps,
+ SCL_COEF_LUMA_VERT_FILTER, filter_v);
+ }
+ dpp->filter_v = filter_v;
+ if (chroma_coef_mode) {
+ if (!h_2tap_hardcode_coef_en && filter_h_c) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.h_taps_c,
+ SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c);
+ }
+ if (!v_2tap_hardcode_coef_en && filter_v_c) {
+ dpp1_dscl_set_scaler_filter(
+ dpp, scl_data->taps.v_taps_c,
+ SCL_COEF_CHROMA_VERT_FILTER, filter_v_c);
+ }
+ }
+ dpp->filter_h_c = filter_h_c;
+ dpp->filter_v_c = filter_v_c;
+
+ coef_ram_current = get_reg_field_value_ex(
+ scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT,
+ dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT);
+
+ /* Swap coefficient RAM and set chroma coefficient mode */
+ REG_SET_2(SCL_MODE, scl_mode,
+ SCL_COEF_RAM_SELECT, !coef_ram_current,
+ SCL_CHROMA_COEF_MODE, chroma_coef_mode);
+ }
+ }
+}
+
+static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth)
+{
+ if (depth == LB_PIXEL_DEPTH_30BPP)
+ return 10;
+ else if (depth == LB_PIXEL_DEPTH_24BPP)
+ return 8;
+ else if (depth == LB_PIXEL_DEPTH_18BPP)
+ return 6;
+ else if (depth == LB_PIXEL_DEPTH_36BPP)
+ return 12;
+ else {
+ BREAK_TO_DEBUGGER();
+ return -1; /* Unsupported */
+ }
+}
+
+void dpp1_dscl_calc_lb_num_partitions(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c)
+{
+ int line_size = scl_data->viewport.width < scl_data->recout.width ?
+ scl_data->viewport.width : scl_data->recout.width;
+ int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
+ scl_data->viewport_c.width : scl_data->recout.width;
+ int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+ int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+ int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+ int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
+ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ if (lb_config == LB_MEMORY_CONFIG_1) {
+ lb_memory_size = 816;
+ lb_memory_size_c = 816;
+ lb_memory_size_a = 984;
+ } else if (lb_config == LB_MEMORY_CONFIG_2) {
+ lb_memory_size = 1088;
+ lb_memory_size_c = 1088;
+ lb_memory_size_a = 1312;
+ } else if (lb_config == LB_MEMORY_CONFIG_3) {
+ /* 420 mode: using 3rd mem from Y, Cr and Cb */
+ lb_memory_size = 816 + 1088 + 848 + 848 + 848;
+ lb_memory_size_c = 816 + 1088;
+ lb_memory_size_a = 984 + 1312 + 456;
+ } else {
+ lb_memory_size = 816 + 1088 + 848;
+ lb_memory_size_c = 816 + 1088 + 848;
+ lb_memory_size_a = 984 + 1312 + 456;
+ }
+ *num_part_y = lb_memory_size / memory_line_size_y;
+ *num_part_c = lb_memory_size_c / memory_line_size_c;
+ num_partitions_a = lb_memory_size_a / memory_line_size_a;
+
+ if (scl_data->lb_params.alpha_en
+ && (num_partitions_a < *num_part_y))
+ *num_part_y = num_partitions_a;
+
+ if (*num_part_y > 64)
+ *num_part_y = 64;
+ if (*num_part_c > 64)
+ *num_part_c = 64;
+
+}
+
+bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps)
+{
+ if (ceil_vratio > 2)
+ return vtaps <= (num_partitions - ceil_vratio + 2);
+ else
+ return vtaps <= num_partitions;
+}
+
+/*find first match configuration which meets the min required lb size*/
+static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ int num_part_y, num_part_c;
+ int vtaps = scl_data->taps.v_taps;
+ int vtaps_c = scl_data->taps.v_taps_c;
+ int ceil_vratio = dal_fixed31_32_ceil(scl_data->ratios.vert);
+ int ceil_vratio_c = dal_fixed31_32_ceil(scl_data->ratios.vert_c);
+ enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0;
+
+ if (dpp->base.ctx->dc->debug.use_max_lb)
+ return mem_cfg;
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_1;
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_2;
+
+ if (scl_data->format == PIXEL_FORMAT_420BPP8
+ || scl_data->format == PIXEL_FORMAT_420BPP10) {
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c);
+
+ if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c))
+ return LB_MEMORY_CONFIG_3;
+ }
+
+ dpp->base.caps->dscl_calc_lb_num_partitions(
+ scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c);
+
+ /*Ensure we can support the requested number of vtaps*/
+ ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps)
+ && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c));
+
+ return LB_MEMORY_CONFIG_0;
+}
+
+void dpp1_dscl_set_scaler_auto_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data)
+{
+ enum lb_memory_config lb_config;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
+ dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
+ bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+
+ dpp1_dscl_set_overscan(dpp, scl_data);
+
+ dpp1_dscl_set_otg_blank(dpp, scl_data);
+
+ REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
+
+ if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
+ return;
+
+ lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
+ dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
+
+ if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
+ return;
+
+ /* TODO: v_min */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
+ /* Black offsets */
+ if (ycbcr)
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
+ else
+
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
+
+ REG_SET_4(SCL_TAP_CONTROL, 0,
+ SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
+ SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
+ SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
+ SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
+
+ dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+}
+
+
+static void dpp1_dscl_set_manual_ratio_init(
+ struct dcn10_dpp *dpp, const struct scaler_data *data)
+{
+ uint32_t init_frac = 0;
+ uint32_t init_int = 0;
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0,
+ SCL_H_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.horz) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0,
+ SCL_V_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.vert) << 5);
+
+ REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0,
+ SCL_H_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.horz_c) << 5);
+
+ REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0,
+ SCL_V_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.vert_c) << 5);
+
+ /*
+ * 0.24 format for fraction, first five bits zeroed
+ */
+ init_frac = dal_fixed31_32_u0d19(data->inits.h) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.h);
+ REG_SET_2(SCL_HORZ_FILTER_INIT, 0,
+ SCL_H_INIT_FRAC, init_frac,
+ SCL_H_INIT_INT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.h_c) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.h_c);
+ REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0,
+ SCL_H_INIT_FRAC_C, init_frac,
+ SCL_H_INIT_INT_C, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v);
+ REG_SET_2(SCL_VERT_FILTER_INIT, 0,
+ SCL_V_INIT_FRAC, init_frac,
+ SCL_V_INIT_INT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_bot) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
+ SCL_V_INIT_FRAC_BOT, init_frac,
+ SCL_V_INIT_INT_BOT, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_c) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_c);
+ REG_SET_2(SCL_VERT_FILTER_INIT_C, 0,
+ SCL_V_INIT_FRAC_C, init_frac,
+ SCL_V_INIT_INT_C, init_int);
+
+ init_frac = dal_fixed31_32_u0d19(data->inits.v_c_bot) << 5;
+ init_int = dal_fixed31_32_floor(data->inits.v_c_bot);
+ REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
+ SCL_V_INIT_FRAC_BOT_C, init_frac,
+ SCL_V_INIT_INT_BOT_C, init_int);
+}
+
+
+
+static void dpp1_dscl_set_recout(
+ struct dcn10_dpp *dpp, const struct rect *recout)
+{
+ REG_SET_2(RECOUT_START, 0,
+ /* First pixel of RECOUT */
+ RECOUT_START_X, recout->x,
+ /* First line of RECOUT */
+ RECOUT_START_Y, recout->y);
+
+ REG_SET_2(RECOUT_SIZE, 0,
+ /* Number of RECOUT horizontal pixels */
+ RECOUT_WIDTH, recout->width,
+ /* Number of RECOUT vertical lines */
+ RECOUT_HEIGHT, recout->height
+ - dpp->base.ctx->dc->debug.surface_visual_confirm * 4 *
+ (dpp->base.inst + 1));
+}
+
+/* Main function to program scaler and line buffer in manual scaling mode */
+void dpp1_dscl_set_scaler_manual_scale(
+ struct dpp *dpp_base,
+ const struct scaler_data *scl_data)
+{
+ enum lb_memory_config lb_config;
+ struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
+ enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode(
+ dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
+ bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
+ && scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+
+ /* Recout */
+ dpp1_dscl_set_recout(dpp, &scl_data->recout);
+
+ /* MPC Size */
+ REG_SET_2(MPC_SIZE, 0,
+ /* Number of horizontal pixels of MPC */
+ MPC_WIDTH, scl_data->h_active,
+ /* Number of vertical lines of MPC */
+ MPC_HEIGHT, scl_data->v_active);
+
+ /* SCL mode */
+ REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
+
+ if (dscl_mode == DSCL_MODE_DSCL_BYPASS)
+ return;
+
+ /* LB */
+ lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data);
+ dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
+
+ if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS)
+ return;
+
+ /* Autocal off */
+ REG_SET_3(DSCL_AUTOCAL, 0,
+ AUTOCAL_MODE, AUTOCAL_MODE_OFF,
+ AUTOCAL_NUM_PIPE, 0,
+ AUTOCAL_PIPE_ID, 0);
+
+ /* Black offsets */
+ if (ycbcr)
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR);
+ else
+
+ REG_SET_2(SCL_BLACK_OFFSET, 0,
+ SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y,
+ SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y);
+
+ /* Manually calculate scale ratio and init values */
+ dpp1_dscl_set_manual_ratio_init(dpp, scl_data);
+
+ /* HTaps/VTaps */
+ REG_SET_4(SCL_TAP_CONTROL, 0,
+ SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1,
+ SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1,
+ SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1,
+ SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1);
+
+ dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
new file mode 100644
index 000000000000..b13dee64e0ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -0,0 +1,960 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+#include "dcn10_hubp.h"
+
+#define REG(reg)\
+ hubp1->mi_regs->reg
+
+#define CTX \
+ hubp1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp1->mi_shift->field_name, hubp1->mi_mask->field_name
+
+void hubp1_set_blank(struct hubp *hubp, bool blank)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ REG_UPDATE_2(DCHUBP_CNTL,
+ HUBP_BLANK_EN, blank_en,
+ HUBP_TTU_DISABLE, blank_en);
+
+ if (blank) {
+ REG_WAIT(DCHUBP_CNTL,
+ HUBP_NO_OUTSTANDING_REQ, 1,
+ 1, 200);
+ hubp->mpcc_id = 0xf;
+ hubp->opp_id = 0xf;
+ }
+}
+
+static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ uint32_t blank_en = blank ? 1 : 0;
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_BLANK_EN, blank_en);
+}
+
+static void hubp1_vready_workaround(struct hubp *hubp,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ uint32_t value = 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* set HBUBREQ_DEBUG_DB[12] = 1 */
+ value = REG_READ(HUBPREQ_DEBUG_DB);
+
+ /* hack mode disable */
+ value |= 0x100;
+ value &= ~0x1000;
+
+ if ((pipe_dest->vstartup_start - 2*(pipe_dest->vready_offset+pipe_dest->vupdate_width
+ + pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
+ /* if (eco_fix_needed(otg_global_sync_timing)
+ * set HBUBREQ_DEBUG_DB[12] = 1 */
+ value |= 0x1000;
+ }
+
+ REG_WRITE(HUBPREQ_DEBUG_DB, value);
+}
+
+void hubp1_program_tiling(
+ struct dcn10_hubp *hubp1,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format)
+{
+ REG_UPDATE_6(DCSURF_ADDR_CONFIG,
+ NUM_PIPES, log_2(info->gfx9.num_pipes),
+ NUM_BANKS, log_2(info->gfx9.num_banks),
+ PIPE_INTERLEAVE, info->gfx9.pipe_interleave,
+ NUM_SE, log_2(info->gfx9.num_shader_engines),
+ NUM_RB_PER_SE, log_2(info->gfx9.num_rb_per_se),
+ MAX_COMPRESSED_FRAGS, log_2(info->gfx9.max_compressed_frags));
+
+ REG_UPDATE_4(DCSURF_TILING_CONFIG,
+ SW_MODE, info->gfx9.swizzle,
+ META_LINEAR, info->gfx9.meta_linear,
+ RB_ALIGNED, info->gfx9.rb_aligned,
+ PIPE_ALIGNED, info->gfx9.pipe_aligned);
+}
+
+void hubp1_program_size_and_rotation(
+ struct dcn10_hubp *hubp1,
+ enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ uint32_t pitch, meta_pitch, pitch_c, meta_pitch_c, mirror;
+
+ /* Program data and meta surface pitch (calculation from addrlib)
+ * 444 or 420 luma
+ */
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ pitch = plane_size->video.luma_pitch - 1;
+ meta_pitch = dcc->video.meta_pitch_l - 1;
+ pitch_c = plane_size->video.chroma_pitch - 1;
+ meta_pitch_c = dcc->video.meta_pitch_c - 1;
+ } else {
+ pitch = plane_size->grph.surface_pitch - 1;
+ meta_pitch = dcc->grph.meta_pitch - 1;
+ pitch_c = 0;
+ meta_pitch_c = 0;
+ }
+
+ if (!dcc->enable) {
+ meta_pitch = 0;
+ meta_pitch_c = 0;
+ }
+
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH,
+ PITCH, pitch, META_PITCH, meta_pitch);
+
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ REG_UPDATE_2(DCSURF_SURFACE_PITCH_C,
+ PITCH_C, pitch_c, META_PITCH_C, meta_pitch_c);
+
+ if (horizontal_mirror)
+ mirror = 1;
+ else
+ mirror = 0;
+
+
+ /* Program rotation angle and horz mirror - no mirror */
+ if (rotation == ROTATION_ANGLE_0)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 0,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_90)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 1,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_180)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 2,
+ H_MIRROR_EN, mirror);
+ else if (rotation == ROTATION_ANGLE_270)
+ REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, 3,
+ H_MIRROR_EN, mirror);
+}
+
+void hubp1_program_pixel_format(
+ struct dcn10_hubp *hubp1,
+ enum surface_pixel_format format)
+{
+ uint32_t red_bar = 3;
+ uint32_t blue_bar = 2;
+
+ /* swap for ABGR format */
+ if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+ || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+ red_bar = 2;
+ blue_bar = 3;
+ }
+
+ REG_UPDATE_2(HUBPRET_CONTROL,
+ CROSSBAR_SRC_CB_B, blue_bar,
+ CROSSBAR_SRC_CR_R, red_bar);
+
+ /* Mapping is same as ipp programming (cnvc) */
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 1);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 3);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 8);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 10);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 22);
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 24);
+ break;
+
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 65);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 64);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 67);
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 66);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* don't see the need of program the xbar in DCN 1.0 */
+}
+
+bool hubp1_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* program flip type */
+ REG_SET(DCSURF_FLIP_CONTROL, 0,
+ SURFACE_FLIP_TYPE, flip_immediate);
+
+ /* HW automatically latch rest of address register on write to
+ * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used
+ *
+ * program high first and then the low addr, order matters!
+ */
+ switch (address->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ /* DCN1.0 does not support const color
+ * TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1
+ * base on address->grph.dcc_const_color
+ * x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma
+ * x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma
+ */
+
+ if (address->grph.addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph.meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph.meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph.meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph.addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph.addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ if (address->video_progressive.luma_addr.quad_part == 0
+ || address->video_progressive.chroma_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->video_progressive.luma_meta_addr.quad_part != 0) {
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0,
+ PRIMARY_META_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_meta_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->video_progressive.luma_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH_C,
+ address->video_progressive.chroma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0,
+ PRIMARY_SURFACE_ADDRESS_C,
+ address->video_progressive.chroma_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->video_progressive.luma_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->video_progressive.luma_addr.low_part);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ if (address->grph_stereo.left_addr.quad_part == 0)
+ break;
+ if (address->grph_stereo.right_addr.quad_part == 0)
+ break;
+
+ REG_UPDATE(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_TMZ, address->tmz_surface);
+
+ if (address->grph_stereo.right_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_meta_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0,
+ SECONDARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.right_meta_addr.low_part);
+ }
+ if (address->grph_stereo.left_meta_addr.quad_part != 0) {
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_META_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_meta_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0,
+ PRIMARY_META_SURFACE_ADDRESS,
+ address->grph_stereo.left_meta_addr.low_part);
+ }
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0,
+ SECONDARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.right_addr.high_part);
+
+ REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0,
+ SECONDARY_SURFACE_ADDRESS,
+ address->grph_stereo.right_addr.low_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0,
+ PRIMARY_SURFACE_ADDRESS_HIGH,
+ address->grph_stereo.left_addr.high_part);
+
+ REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0,
+ PRIMARY_SURFACE_ADDRESS,
+ address->grph_stereo.left_addr.low_part);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ hubp->request_address = *address;
+
+ if (flip_immediate)
+ hubp->current_address = *address;
+
+ return true;
+}
+
+void hubp1_dcc_control(struct hubp *hubp, bool enable,
+ bool independent_64b_blks)
+{
+ uint32_t dcc_en = enable ? 1 : 0;
+ uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE_2(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, dcc_en,
+ PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk);
+}
+
+void hubp1_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ hubp1_dcc_control(hubp, dcc->enable, dcc->grph.independent_64b_blks);
+ hubp1_program_tiling(hubp1, tiling_info, format);
+ hubp1_program_size_and_rotation(
+ hubp1, rotation, format, plane_size, dcc, horizontal_mirror);
+ hubp1_program_pixel_format(hubp1, format);
+}
+
+void hubp1_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ DPTE_GROUP_SIZE, rq_regs->rq_regs_l.dpte_group_size,
+ MPTE_GROUP_SIZE, rq_regs->rq_regs_l.mpte_group_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height,
+ PTE_ROW_HEIGHT_LINEAR, rq_regs->rq_regs_l.pte_row_height_linear);
+ REG_SET_8(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ DPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.dpte_group_size,
+ MPTE_GROUP_SIZE_C, rq_regs->rq_regs_c.mpte_group_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height,
+ PTE_ROW_HEIGHT_LINEAR_C, rq_regs->rq_regs_c.pte_row_height_linear);
+}
+
+
+void hubp1_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ /* DLG - Per hubp */
+ REG_SET_2(BLANK_OFFSET_0, 0,
+ REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
+ DLG_V_BLANK_END, dlg_attr->dlg_vblank_end);
+
+ REG_SET(BLANK_OFFSET_1, 0,
+ MIN_DST_Y_NEXT_START, dlg_attr->min_dst_y_next_start);
+
+ REG_SET(DST_DIMENSIONS, 0,
+ REFCYC_PER_HTOTAL, dlg_attr->refcyc_per_htotal);
+
+ REG_SET_2(DST_AFTER_SCALER, 0,
+ REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
+ DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
+
+ if (REG(PREFETCH_SETTINS))
+ REG_SET_2(PREFETCH_SETTINS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+ else
+ REG_SET_2(PREFETCH_SETTINGS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+ REG_SET_2(VBLANK_PARAMETERS_0, 0,
+ DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+ REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
+ REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
+
+ /* DLG - Per luma/chroma */
+ REG_SET(VBLANK_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
+
+ REG_SET(VBLANK_PARAMETERS_3, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ REG_SET(NOM_PARAMETERS_0, 0,
+ DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_1, 0,
+ REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l);
+
+ REG_SET(NOM_PARAMETERS_4, 0,
+ DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l);
+
+ REG_SET(NOM_PARAMETERS_5, 0,
+ REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
+
+ REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_SET_2(PER_LINE_DELIVERY, 0,
+ REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
+ REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
+
+ if (REG(PREFETCH_SETTINS_C))
+ REG_SET(PREFETCH_SETTINS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+ else
+ REG_SET(PREFETCH_SETTINGS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+ REG_SET(VBLANK_PARAMETERS_2, 0,
+ REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
+
+ REG_SET(VBLANK_PARAMETERS_4, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ REG_SET(NOM_PARAMETERS_2, 0,
+ DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_3, 0,
+ REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c);
+
+ REG_SET(NOM_PARAMETERS_6, 0,
+ DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c);
+
+ REG_SET(NOM_PARAMETERS_7, 0,
+ REFCYC_PER_META_CHUNK_NOM_C, dlg_attr->refcyc_per_meta_chunk_nom_c);
+
+ /* TTU - per hubp */
+ REG_SET_2(DCN_TTU_QOS_WM, 0,
+ QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
+
+ REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+ MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+
+ /* TTU - per luma/chroma */
+ /* Assumed surf0 is luma and 1 is chroma */
+
+ REG_SET_3(DCN_SURF0_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_l,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
+
+ REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_l);
+
+ REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
+ REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
+ QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
+ QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
+
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
+}
+
+static void hubp1_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ /* otg is locked when this func is called. Register are double buffered.
+ * disable the requestors is not needed
+ */
+ hubp1_program_requestor(hubp, rq_regs);
+ hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
+ hubp1_vready_workaround(hubp, pipe_dest);
+}
+
+bool hubp1_is_flip_pending(struct hubp *hubp)
+{
+ uint32_t flip_pending = 0;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct dc_plane_address earliest_inuse_address;
+
+ REG_GET(DCSURF_FLIP_CONTROL,
+ SURFACE_FLIP_PENDING, &flip_pending);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE,
+ SURFACE_EARLIEST_INUSE_ADDRESS, &earliest_inuse_address.grph.addr.low_part);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &earliest_inuse_address.grph.addr.high_part);
+
+ if (flip_pending)
+ return true;
+
+ if (earliest_inuse_address.grph.addr.quad_part != hubp->request_address.grph.addr.quad_part)
+ return true;
+
+ hubp->current_address = hubp->request_address;
+ return false;
+}
+
+uint32_t aperture_default_system = 1;
+uint32_t context0_default_system; /* = 0;*/
+
+static void hubp1_set_vm_system_aperture_settings(struct hubp *hubp,
+ struct vm_system_aperture_param *apt)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
+ PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
+
+ mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
+ mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 12;
+ mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 12;
+
+ REG_SET_2(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, aperture_default_system, /* 1 = system physical memory */
+ MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mc_vm_apt_default.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mc_vm_apt_default.low_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mc_vm_apt_low.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mc_vm_apt_low.low_part);
+
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mc_vm_apt_high.high_part);
+ REG_SET(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, 0,
+ MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mc_vm_apt_high.low_part);
+}
+
+static void hubp1_set_vm_context0_settings(struct hubp *hubp,
+ const struct vm_context0_param *vm0)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ /* pte base */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, vm0->pte_base.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, vm0->pte_base.low_part);
+
+ /* pte start */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, vm0->pte_start.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, vm0->pte_start.low_part);
+
+ /* pte end */
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, vm0->pte_end.high_part);
+ REG_SET(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, 0,
+ VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, vm0->pte_end.low_part);
+
+ /* fault handling */
+ REG_SET_2(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, 0,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, vm0->fault_default.high_part,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, context0_default_system);
+ REG_SET(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, 0,
+ VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, vm0->fault_default.low_part);
+
+ /* control: enable VM PTE*/
+ REG_SET_2(DCN_VM_MX_L1_TLB_CNTL, 0,
+ ENABLE_L1_TLB, 1,
+ SYSTEM_ACCESS_MODE, 3);
+}
+
+void min_set_viewport(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0,
+ PRI_VIEWPORT_WIDTH, viewport->width,
+ PRI_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START, 0,
+ PRI_VIEWPORT_X_START, viewport->x,
+ PRI_VIEWPORT_Y_START, viewport->y);
+
+ /*for stereo*/
+ REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION, 0,
+ SEC_VIEWPORT_WIDTH, viewport->width,
+ SEC_VIEWPORT_HEIGHT, viewport->height);
+
+ REG_SET_2(DCSURF_SEC_VIEWPORT_START, 0,
+ SEC_VIEWPORT_X_START, viewport->x,
+ SEC_VIEWPORT_Y_START, viewport->y);
+
+ /* DC supports NV12 only at the moment */
+ REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0,
+ PRI_VIEWPORT_WIDTH_C, viewport_c->width,
+ PRI_VIEWPORT_HEIGHT_C, viewport_c->height);
+
+ REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0,
+ PRI_VIEWPORT_X_START_C, viewport_c->x,
+ PRI_VIEWPORT_Y_START_C, viewport_c->y);
+}
+
+void hubp1_read_state(struct dcn10_hubp *hubp1,
+ struct dcn_hubp_state *s)
+{
+ REG_GET(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, &s->pixel_format);
+
+ REG_GET(DCSURF_SURFACE_EARLIEST_INUSE_HIGH,
+ SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, &s->inuse_addr_hi);
+
+ REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION,
+ PRI_VIEWPORT_WIDTH, &s->viewport_width,
+ PRI_VIEWPORT_HEIGHT, &s->viewport_height);
+
+ REG_GET_2(DCSURF_SURFACE_CONFIG,
+ ROTATION_ANGLE, &s->rotation_angle,
+ H_MIRROR_EN, &s->h_mirror_en);
+
+ REG_GET(DCSURF_TILING_CONFIG,
+ SW_MODE, &s->sw_mode);
+
+ REG_GET(DCSURF_SURFACE_CONTROL,
+ PRIMARY_SURFACE_DCC_EN, &s->dcc_en);
+
+ REG_GET_3(DCHUBP_CNTL,
+ HUBP_BLANK_EN, &s->blank_en,
+ HUBP_TTU_DISABLE, &s->ttu_disable,
+ HUBP_UNDERFLOW_STATUS, &s->underflow_status);
+
+ REG_GET(DCN_GLOBAL_TTU_CNTL,
+ MIN_TTU_VBLANK, &s->min_ttu_vblank);
+
+ REG_GET_2(DCN_TTU_QOS_WM,
+ QoS_LEVEL_LOW_WM, &s->qos_level_low_wm,
+ QoS_LEVEL_HIGH_WM, &s->qos_level_high_wm);
+}
+
+enum cursor_pitch {
+ CURSOR_PITCH_64_PIXELS = 0,
+ CURSOR_PITCH_128_PIXELS,
+ CURSOR_PITCH_256_PIXELS
+};
+
+enum cursor_lines_per_chunk {
+ CURSOR_LINE_PER_CHUNK_2 = 1,
+ CURSOR_LINE_PER_CHUNK_4,
+ CURSOR_LINE_PER_CHUNK_8,
+ CURSOR_LINE_PER_CHUNK_16
+};
+
+static bool ippn10_cursor_program_control(
+ struct dcn10_hubp *hubp1,
+ bool pixel_data_invert,
+ enum dc_cursor_color_format color_format)
+{
+ if (REG(CURSOR_SETTINS))
+ REG_SET_2(CURSOR_SETTINS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ else
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ return true;
+}
+
+static enum cursor_pitch ippn10_get_cursor_pitch(
+ unsigned int pitch)
+{
+ enum cursor_pitch hw_pitch;
+
+ switch (pitch) {
+ case 64:
+ hw_pitch = CURSOR_PITCH_64_PIXELS;
+ break;
+ case 128:
+ hw_pitch = CURSOR_PITCH_128_PIXELS;
+ break;
+ case 256:
+ hw_pitch = CURSOR_PITCH_256_PIXELS;
+ break;
+ default:
+ DC_ERR("Invalid cursor pitch of %d. "
+ "Only 64/128/256 is supported on DCN.\n", pitch);
+ hw_pitch = CURSOR_PITCH_64_PIXELS;
+ break;
+ }
+ return hw_pitch;
+}
+
+static enum cursor_lines_per_chunk ippn10_get_lines_per_chunk(
+ unsigned int cur_width,
+ enum dc_cursor_color_format format)
+{
+ enum cursor_lines_per_chunk line_per_chunk;
+
+ if (format == CURSOR_MODE_MONO)
+ /* impl B. expansion in CUR Buffer reader */
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cur_width <= 32)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_16;
+ else if (cur_width <= 64)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_8;
+ else if (cur_width <= 128)
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_4;
+ else
+ line_per_chunk = CURSOR_LINE_PER_CHUNK_2;
+
+ return line_per_chunk;
+}
+
+void hubp1_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ enum cursor_pitch hw_pitch = ippn10_get_cursor_pitch(attr->pitch);
+ enum cursor_lines_per_chunk lpc = ippn10_get_lines_per_chunk(
+ attr->width, attr->color_format);
+
+ hubp->curs_attr = *attr;
+
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+ REG_UPDATE_3(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+ ippn10_cursor_program_control(hubp1,
+ attr->attribute_flags.bits.INVERT_PIXEL_DATA,
+ attr->color_format);
+}
+
+void hubp1_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ int src_x_offset = pos->x - pos->x_hotspot - param->viewport_x_start;
+ uint32_t cur_en = pos->enable ? 1 : 0;
+ uint32_t dst_x_offset = (src_x_offset >= 0) ? src_x_offset : 0;
+
+ /*
+ * Guard aganst cursor_set_position() from being called with invalid
+ * attributes
+ *
+ * TODO: Look at combining cursor_set_position() and
+ * cursor_set_attributes() into cursor_update()
+ */
+ if (hubp->curs_attr.address.quad_part == 0)
+ return;
+
+ dst_x_offset *= param->ref_clk_khz;
+ dst_x_offset /= param->pixel_clk_khz;
+
+ ASSERT(param->h_scale_ratio.value);
+
+ if (param->h_scale_ratio.value)
+ dst_x_offset = dal_fixed31_32_floor(dal_fixed31_32_div(
+ dal_fixed31_32_from_int(dst_x_offset),
+ param->h_scale_ratio));
+
+ if (src_x_offset >= (int)param->viewport_width)
+ cur_en = 0; /* not visible beyond right edge*/
+
+ if (src_x_offset + (int)hubp->curs_attr.width < 0)
+ cur_en = 0; /* not visible beyond left edge*/
+
+ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ hubp1_cursor_set_attributes(hubp, &hubp->curs_attr);
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
+
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
+
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ /* TODO Handle surface pixel formats other than 4:4:4 */
+}
+
+static struct hubp_funcs dcn10_hubp_funcs = {
+ .hubp_program_surface_flip_and_addr =
+ hubp1_program_surface_flip_and_addr,
+ .hubp_program_surface_config =
+ hubp1_program_surface_config,
+ .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_setup = hubp1_setup,
+ .hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
+ .hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
+ .set_blank = hubp1_set_blank,
+ .dcc_control = hubp1_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .set_hubp_blank_en = hubp1_set_hubp_blank_en,
+ .set_cursor_attributes = hubp1_cursor_set_attributes,
+ .set_cursor_position = hubp1_cursor_set_position,
+};
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+void dcn10_hubp_construct(
+ struct dcn10_hubp *hubp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_mi_registers *mi_regs,
+ const struct dcn_mi_shift *mi_shift,
+ const struct dcn_mi_mask *mi_mask)
+{
+ hubp1->base.funcs = &dcn10_hubp_funcs;
+ hubp1->base.ctx = ctx;
+ hubp1->mi_regs = mi_regs;
+ hubp1->mi_shift = mi_shift;
+ hubp1->mi_mask = mi_mask;
+ hubp1->base.inst = inst;
+ hubp1->base.opp_id = 0xf;
+ hubp1->base.mpcc_id = 0xf;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
new file mode 100644
index 000000000000..66db453c801b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -0,0 +1,683 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_DCN10_H__
+#define __DC_MEM_INPUT_DCN10_H__
+
+#include "hubp.h"
+
+#define TO_DCN10_HUBP(hubp)\
+ container_of(hubp, struct dcn10_hubp, base)
+
+#define MI_REG_LIST_DCN(id)\
+ SRI(DCHUBP_CNTL, HUBP, id),\
+ SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
+ SRI(DCSURF_ADDR_CONFIG, HUBP, id),\
+ SRI(DCSURF_TILING_CONFIG, HUBP, id),\
+ SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_PITCH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_CONFIG, HUBP, id),\
+ SRI(DCSURF_FLIP_CONTROL, HUBPREQ, id),\
+ SRI(DCSURF_PRI_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_START, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_DIMENSION, HUBP, id), \
+ SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \
+ SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_INUSE_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\
+ SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
+ SRI(HUBPRET_CONTROL, HUBPRET, id),\
+ SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
+ SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
+ SRI(DCHUBP_REQ_SIZE_CONFIG_C, HUBP, id),\
+ SRI(BLANK_OFFSET_0, HUBPREQ, id),\
+ SRI(BLANK_OFFSET_1, HUBPREQ, id),\
+ SRI(DST_DIMENSIONS, HUBPREQ, id),\
+ SRI(DST_AFTER_SCALER, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_0, HUBPREQ, id),\
+ SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_0, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_1, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_4, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_5, HUBPREQ, id),\
+ SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\
+ SRI(PER_LINE_DELIVERY, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\
+ SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_2, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_3, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_6, HUBPREQ, id),\
+ SRI(NOM_PARAMETERS_7, HUBPREQ, id),\
+ SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\
+ SRI(DCN_GLOBAL_TTU_CNTL, HUBPREQ, id),\
+ SRI(DCN_SURF0_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\
+ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
+ SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id)
+
+#define MI_REG_LIST_DCN10(id)\
+ MI_REG_LIST_DCN(id),\
+ SRI(PREFETCH_SETTINS, HUBPREQ, id),\
+ SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, HUBPREQ, id),\
+ SRI(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, HUBPREQ, id),\
+ SR(DCHUBBUB_SDPIF_FB_BASE),\
+ SR(DCHUBBUB_SDPIF_FB_OFFSET),\
+ SRI(CURSOR_SETTINS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR, id), \
+ SRI(CURSOR_SIZE, CURSOR, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR, id)
+
+
+
+struct dcn_mi_registers {
+ uint32_t DCHUBP_CNTL;
+ uint32_t HUBPREQ_DEBUG_DB;
+ uint32_t DCSURF_ADDR_CONFIG;
+ uint32_t DCSURF_TILING_CONFIG;
+ uint32_t DCSURF_SURFACE_PITCH;
+ uint32_t DCSURF_SURFACE_PITCH_C;
+ uint32_t DCSURF_SURFACE_CONFIG;
+ uint32_t DCSURF_FLIP_CONTROL;
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION;
+ uint32_t DCSURF_PRI_VIEWPORT_START;
+ uint32_t DCSURF_SEC_VIEWPORT_DIMENSION;
+ uint32_t DCSURF_SEC_VIEWPORT_START;
+ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C;
+ uint32_t DCSURF_PRI_VIEWPORT_START_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH;
+ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C;
+ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C;
+ uint32_t DCSURF_SURFACE_INUSE;
+ uint32_t DCSURF_SURFACE_INUSE_HIGH;
+ uint32_t DCSURF_SURFACE_INUSE_C;
+ uint32_t DCSURF_SURFACE_INUSE_HIGH_C;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C;
+ uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C;
+ uint32_t DCSURF_SURFACE_CONTROL;
+ uint32_t HUBPRET_CONTROL;
+ uint32_t DCN_EXPANSION_MODE;
+ uint32_t DCHUBP_REQ_SIZE_CONFIG;
+ uint32_t DCHUBP_REQ_SIZE_CONFIG_C;
+ uint32_t BLANK_OFFSET_0;
+ uint32_t BLANK_OFFSET_1;
+ uint32_t DST_DIMENSIONS;
+ uint32_t DST_AFTER_SCALER;
+ uint32_t PREFETCH_SETTINS;
+ uint32_t PREFETCH_SETTINGS;
+ uint32_t VBLANK_PARAMETERS_0;
+ uint32_t REF_FREQ_TO_PIX_FREQ;
+ uint32_t VBLANK_PARAMETERS_1;
+ uint32_t VBLANK_PARAMETERS_3;
+ uint32_t NOM_PARAMETERS_0;
+ uint32_t NOM_PARAMETERS_1;
+ uint32_t NOM_PARAMETERS_4;
+ uint32_t NOM_PARAMETERS_5;
+ uint32_t PER_LINE_DELIVERY_PRE;
+ uint32_t PER_LINE_DELIVERY;
+ uint32_t PREFETCH_SETTINS_C;
+ uint32_t PREFETCH_SETTINGS_C;
+ uint32_t VBLANK_PARAMETERS_2;
+ uint32_t VBLANK_PARAMETERS_4;
+ uint32_t NOM_PARAMETERS_2;
+ uint32_t NOM_PARAMETERS_3;
+ uint32_t NOM_PARAMETERS_6;
+ uint32_t NOM_PARAMETERS_7;
+ uint32_t DCN_TTU_QOS_WM;
+ uint32_t DCN_GLOBAL_TTU_CNTL;
+ uint32_t DCN_SURF0_TTU_CNTL0;
+ uint32_t DCN_SURF0_TTU_CNTL1;
+ uint32_t DCN_SURF1_TTU_CNTL0;
+ uint32_t DCN_SURF1_TTU_CNTL1;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
+ uint32_t DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
+ uint32_t DCN_VM_MX_L1_TLB_CNTL;
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;
+ uint32_t DCN_VM_SYSTEM_APERTURE_LOW_ADDR;
+ uint32_t DCN_VM_SYSTEM_APERTURE_HIGH_ADDR;
+ uint32_t DCHUBBUB_SDPIF_FB_BASE;
+ uint32_t DCHUBBUB_SDPIF_FB_OFFSET;
+ uint32_t DCN_VM_FB_LOCATION_TOP;
+ uint32_t DCN_VM_FB_LOCATION_BASE;
+ uint32_t DCN_VM_FB_OFFSET;
+ uint32_t DCN_VM_AGP_BASE;
+ uint32_t DCN_VM_AGP_BOT;
+ uint32_t DCN_VM_AGP_TOP;
+ uint32_t CURSOR_SETTINS;
+ uint32_t CURSOR_SETTINGS;
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR_SIZE;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR_POSITION;
+ uint32_t CURSOR_HOT_SPOT;
+ uint32_t CURSOR_DST_OFFSET;
+};
+
+#define MI_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define MI_MASK_SH_LIST_DCN(mask_sh)\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_SE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_RB_PER_SE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_ADDR_CONFIG, MAX_COMPRESSED_FRAGS, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, SW_MODE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, META_LINEAR, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, RB_ALIGNED, mask_sh),\
+ MI_SF(HUBP0_DCSURF_TILING_CONFIG, PIPE_ALIGNED, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH, META_PITCH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, PITCH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_PITCH_C, META_PITCH_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, ROTATION_ANGLE, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, H_MIRROR_EN, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_TYPE, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_FLIP_PENDING, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, SURFACE_UPDATE_LOCK, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_X_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START, PRI_VIEWPORT_Y_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_WIDTH, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION, SEC_VIEWPORT_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_X_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_SEC_VIEWPORT_START, SEC_VIEWPORT_Y_START, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_WIDTH_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\
+ MI_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS, SECONDARY_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, PRIMARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS, PRIMARY_META_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, SECONDARY_META_SURFACE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C, SURFACE_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE, SURFACE_EARLIEST_INUSE_ADDRESS, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C, SURFACE_EARLIEST_INUSE_ADDRESS_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\
+ MI_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
+ MI_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, DRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, PRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, MRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_EXPANSION_MODE, CRQ_EXPANSION_MODE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
+ MI_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_0, DLG_V_BLANK_END, mask_sh),\
+ MI_SF(HUBPREQ0_BLANK_OFFSET_1, MIN_DST_Y_NEXT_START, mask_sh),\
+ MI_SF(HUBPREQ0_DST_DIMENSIONS, REFCYC_PER_HTOTAL, mask_sh),\
+ MI_SF(HUBPREQ0_DST_AFTER_SCALER, REFCYC_X_AFTER_SCALER, mask_sh),\
+ MI_SF(HUBPREQ0_DST_AFTER_SCALER, DST_Y_AFTER_SCALER, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_VM_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_0, DST_Y_PER_ROW_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_C, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_L, mask_sh),\
+ MI_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\
+ MI_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_HIGH_WM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, MIN_TTU_VBLANK, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_GLOBAL_TTU_CNTL, QoS_LEVEL_FLIP, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh)
+
+#define MI_MASK_SH_LIST_DCN10(mask_sh)\
+ MI_MASK_SH_LIST_DCN(mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\
+ MI_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB, mask_sh),\
+ MI_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh),\
+ MI_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, mask_sh),\
+ MI_SF(HUBPREQ0_DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, mask_sh),\
+ MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ MI_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ MI_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+
+#define DCN_MI_REG_FIELD_LIST(type) \
+ type HUBP_BLANK_EN;\
+ type HUBP_TTU_DISABLE;\
+ type HUBP_NO_OUTSTANDING_REQ;\
+ type HUBP_UNDERFLOW_STATUS;\
+ type NUM_PIPES;\
+ type NUM_BANKS;\
+ type PIPE_INTERLEAVE;\
+ type NUM_SE;\
+ type NUM_RB_PER_SE;\
+ type MAX_COMPRESSED_FRAGS;\
+ type SW_MODE;\
+ type META_LINEAR;\
+ type RB_ALIGNED;\
+ type PIPE_ALIGNED;\
+ type PITCH;\
+ type META_PITCH;\
+ type PITCH_C;\
+ type META_PITCH_C;\
+ type ROTATION_ANGLE;\
+ type H_MIRROR_EN;\
+ type SURFACE_PIXEL_FORMAT;\
+ type SURFACE_FLIP_TYPE;\
+ type SURFACE_UPDATE_LOCK;\
+ type SURFACE_FLIP_PENDING;\
+ type PRI_VIEWPORT_WIDTH; \
+ type PRI_VIEWPORT_HEIGHT; \
+ type PRI_VIEWPORT_X_START; \
+ type PRI_VIEWPORT_Y_START; \
+ type SEC_VIEWPORT_WIDTH; \
+ type SEC_VIEWPORT_HEIGHT; \
+ type SEC_VIEWPORT_X_START; \
+ type SEC_VIEWPORT_Y_START; \
+ type PRI_VIEWPORT_WIDTH_C; \
+ type PRI_VIEWPORT_HEIGHT_C; \
+ type PRI_VIEWPORT_X_START_C; \
+ type PRI_VIEWPORT_Y_START_C; \
+ type PRIMARY_SURFACE_ADDRESS_HIGH;\
+ type PRIMARY_SURFACE_ADDRESS;\
+ type SECONDARY_SURFACE_ADDRESS_HIGH;\
+ type SECONDARY_SURFACE_ADDRESS;\
+ type PRIMARY_META_SURFACE_ADDRESS_HIGH;\
+ type PRIMARY_META_SURFACE_ADDRESS;\
+ type SECONDARY_META_SURFACE_ADDRESS_HIGH;\
+ type SECONDARY_META_SURFACE_ADDRESS;\
+ type PRIMARY_SURFACE_ADDRESS_HIGH_C;\
+ type PRIMARY_SURFACE_ADDRESS_C;\
+ type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\
+ type PRIMARY_META_SURFACE_ADDRESS_C;\
+ type SURFACE_INUSE_ADDRESS;\
+ type SURFACE_INUSE_ADDRESS_HIGH;\
+ type SURFACE_INUSE_ADDRESS_C;\
+ type SURFACE_INUSE_ADDRESS_HIGH_C;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_C;\
+ type SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C;\
+ type PRIMARY_SURFACE_TMZ;\
+ type PRIMARY_SURFACE_DCC_EN;\
+ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
+ type DET_BUF_PLANE1_BASE_ADDRESS;\
+ type CROSSBAR_SRC_CB_B;\
+ type CROSSBAR_SRC_CR_R;\
+ type DRQ_EXPANSION_MODE;\
+ type PRQ_EXPANSION_MODE;\
+ type MRQ_EXPANSION_MODE;\
+ type CRQ_EXPANSION_MODE;\
+ type CHUNK_SIZE;\
+ type MIN_CHUNK_SIZE;\
+ type META_CHUNK_SIZE;\
+ type MIN_META_CHUNK_SIZE;\
+ type DPTE_GROUP_SIZE;\
+ type MPTE_GROUP_SIZE;\
+ type SWATH_HEIGHT;\
+ type PTE_ROW_HEIGHT_LINEAR;\
+ type CHUNK_SIZE_C;\
+ type MIN_CHUNK_SIZE_C;\
+ type META_CHUNK_SIZE_C;\
+ type MIN_META_CHUNK_SIZE_C;\
+ type DPTE_GROUP_SIZE_C;\
+ type MPTE_GROUP_SIZE_C;\
+ type SWATH_HEIGHT_C;\
+ type PTE_ROW_HEIGHT_LINEAR_C;\
+ type REFCYC_H_BLANK_END;\
+ type DLG_V_BLANK_END;\
+ type MIN_DST_Y_NEXT_START;\
+ type REFCYC_PER_HTOTAL;\
+ type REFCYC_X_AFTER_SCALER;\
+ type DST_Y_AFTER_SCALER;\
+ type DST_Y_PREFETCH;\
+ type VRATIO_PREFETCH;\
+ type DST_Y_PER_VM_VBLANK;\
+ type DST_Y_PER_ROW_VBLANK;\
+ type REF_FREQ_TO_PIX_FREQ;\
+ type REFCYC_PER_PTE_GROUP_VBLANK_L;\
+ type REFCYC_PER_META_CHUNK_VBLANK_L;\
+ type DST_Y_PER_PTE_ROW_NOM_L;\
+ type REFCYC_PER_PTE_GROUP_NOM_L;\
+ type DST_Y_PER_META_ROW_NOM_L;\
+ type REFCYC_PER_META_CHUNK_NOM_L;\
+ type REFCYC_PER_LINE_DELIVERY_PRE_L;\
+ type REFCYC_PER_LINE_DELIVERY_PRE_C;\
+ type REFCYC_PER_LINE_DELIVERY_L;\
+ type REFCYC_PER_LINE_DELIVERY_C;\
+ type VRATIO_PREFETCH_C;\
+ type REFCYC_PER_PTE_GROUP_VBLANK_C;\
+ type REFCYC_PER_META_CHUNK_VBLANK_C;\
+ type DST_Y_PER_PTE_ROW_NOM_C;\
+ type REFCYC_PER_PTE_GROUP_NOM_C;\
+ type DST_Y_PER_META_ROW_NOM_C;\
+ type REFCYC_PER_META_CHUNK_NOM_C;\
+ type QoS_LEVEL_LOW_WM;\
+ type QoS_LEVEL_HIGH_WM;\
+ type MIN_TTU_VBLANK;\
+ type QoS_LEVEL_FLIP;\
+ type REFCYC_PER_REQ_DELIVERY;\
+ type QoS_LEVEL_FIXED;\
+ type QoS_RAMP_DISABLE;\
+ type REFCYC_PER_REQ_DELIVERY_PRE;\
+ type VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB;\
+ type VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_START_ADDR_LSB;\
+ type VM_CONTEXT0_PAGE_TABLE_END_ADDR_MSB;\
+ type VM_CONTEXT0_PAGE_TABLE_END_ADDR_LSB;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_MSB;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_SYSTEM;\
+ type VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR_LSB;\
+ type ENABLE_L1_TLB;\
+ type SYSTEM_ACCESS_MODE;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR_MSB;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR_LSB;\
+ type MC_VM_SYSTEM_APERTURE_LOW_ADDR;\
+ type MC_VM_SYSTEM_APERTURE_HIGH_ADDR;\
+ type SDPIF_FB_TOP;\
+ type SDPIF_FB_BASE;\
+ type SDPIF_FB_OFFSET;\
+ type SDPIF_AGP_BASE;\
+ type SDPIF_AGP_BOT;\
+ type SDPIF_AGP_TOP;\
+ type FB_TOP;\
+ type FB_BASE;\
+ type FB_OFFSET;\
+ type AGP_BASE;\
+ type AGP_BOT;\
+ type AGP_TOP;\
+ /* todo: get these from GVM instead of reading registers ourselves */\
+ type PAGE_DIRECTORY_ENTRY_HI32;\
+ type PAGE_DIRECTORY_ENTRY_LO32;\
+ type LOGICAL_PAGE_NUMBER_HI4;\
+ type LOGICAL_PAGE_NUMBER_LO32;\
+ type PHYSICAL_PAGE_ADDR_HI4;\
+ type PHYSICAL_PAGE_ADDR_LO32;\
+ type PHYSICAL_PAGE_NUMBER_MSB;\
+ type PHYSICAL_PAGE_NUMBER_LSB;\
+ type LOGICAL_ADDR;\
+ type CURSOR0_DST_Y_OFFSET; \
+ type CURSOR0_CHUNK_HDL_ADJUST; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+ type OUTPUT_FP
+
+struct dcn_mi_shift {
+ DCN_MI_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn_mi_mask {
+ DCN_MI_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_hubp {
+ struct hubp base;
+ const struct dcn_mi_registers *mi_regs;
+ const struct dcn_mi_shift *mi_shift;
+ const struct dcn_mi_mask *mi_mask;
+};
+
+void hubp1_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+void hubp1_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr);
+
+void hubp1_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs);
+
+void hubp1_program_pixel_format(
+ struct dcn10_hubp *hubp,
+ enum surface_pixel_format format);
+
+void hubp1_program_size_and_rotation(
+ struct dcn10_hubp *hubp,
+ enum dc_rotation_angle rotation,
+ enum surface_pixel_format format,
+ const union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+void hubp1_program_tiling(
+ struct dcn10_hubp *hubp,
+ const union dc_tiling_info *info,
+ const enum surface_pixel_format pixel_format);
+
+void hubp1_dcc_control(struct hubp *hubp,
+ bool enable,
+ bool independent_64b_blks);
+
+bool hubp1_program_surface_flip_and_addr(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+bool hubp1_is_flip_pending(struct hubp *hubp);
+
+void hubp1_cursor_set_attributes(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr);
+
+void hubp1_cursor_set_position(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+void hubp1_set_blank(struct hubp *hubp, bool blank);
+
+void min_set_viewport(struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void dcn10_hubp_construct(
+ struct dcn10_hubp *hubp1,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn_mi_registers *mi_regs,
+ const struct dcn_mi_shift *mi_shift,
+ const struct dcn_mi_mask *mi_mask);
+
+
+struct dcn_hubp_state {
+ uint32_t pixel_format;
+ uint32_t inuse_addr_hi;
+ uint32_t viewport_width;
+ uint32_t viewport_height;
+ uint32_t rotation_angle;
+ uint32_t h_mirror_en;
+ uint32_t sw_mode;
+ uint32_t dcc_en;
+ uint32_t blank_en;
+ uint32_t underflow_status;
+ uint32_t ttu_disable;
+ uint32_t min_ttu_vblank;
+ uint32_t qos_level_low_wm;
+ uint32_t qos_level_high_wm;
+};
+void hubp1_read_state(struct dcn10_hubp *hubp1,
+ struct dcn_hubp_state *s);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
new file mode 100644
index 000000000000..961ad5c3b454
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -0,0 +1,2958 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "resource.h"
+#include "custom_float.h"
+#include "dcn10_hw_sequencer.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+#include "abm.h"
+#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_mpc.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "reg_helper.h"
+#include "custom_float.h"
+#include "dcn10_hubp.h"
+
+#define CTX \
+ hws->ctx
+#define REG(reg)\
+ hws->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+static void log_mpc_crc(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(MPC_CRC_RESULT_GB))
+ DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
+ REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
+ if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
+ DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
+ REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
+}
+
+void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle)
+{
+ static const uint32_t ref_clk_mhz = 48;
+ static const unsigned int frac = 10;
+ uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
+
+ DTN_INFO("%d.%d \t ",
+ us_x10 / frac,
+ us_x10 % frac);
+}
+
+#define DTN_INFO_MICRO_SEC(ref_cycle) \
+ print_microsec(dc_ctx, ref_cycle)
+
+struct dcn_hubbub_wm_set {
+ uint32_t wm_set;
+ uint32_t data_urgent;
+ uint32_t pte_meta_urgent;
+ uint32_t sr_enter;
+ uint32_t sr_exit;
+ uint32_t dram_clk_chanage;
+};
+
+struct dcn_hubbub_wm {
+ struct dcn_hubbub_wm_set sets[4];
+};
+
+static void dcn10_hubbub_wm_read_state(struct dce_hwseq *hws,
+ struct dcn_hubbub_wm *wm)
+{
+ struct dcn_hubbub_wm_set *s;
+
+ s = &wm->sets[0];
+ s->wm_set = 0;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A);
+
+ s = &wm->sets[1];
+ s->wm_set = 1;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B);
+
+ s = &wm->sets[2];
+ s->wm_set = 2;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C);
+
+ s = &wm->sets[3];
+ s->wm_set = 3;
+ s->data_urgent = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D);
+ s->pte_meta_urgent = REG_READ(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D);
+ s->sr_enter = REG_READ(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D);
+ s->sr_exit = REG_READ(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D);
+ s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D);
+}
+
+static void dcn10_log_hubbub_state(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct dcn_hubbub_wm wm;
+ int i;
+
+ dcn10_hubbub_wm_read_state(dc->hwseq, &wm);
+
+ DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t "
+ "sr_enter \t sr_exit \t dram_clk_change \n");
+
+ for (i = 0; i < 4; i++) {
+ struct dcn_hubbub_wm_set *s;
+
+ s = &wm.sets[i];
+ DTN_INFO("WM_Set[%d]:\t ", s->wm_set);
+ DTN_INFO_MICRO_SEC(s->data_urgent);
+ DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
+ DTN_INFO_MICRO_SEC(s->sr_enter);
+ DTN_INFO_MICRO_SEC(s->sr_exit);
+ DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
+ DTN_INFO("\n");
+ }
+
+ DTN_INFO("\n");
+}
+
+static void dcn10_log_hw_state(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ DTN_INFO_BEGIN();
+
+ dcn10_log_hubbub_state(dc);
+
+ DTN_INFO("HUBP:\t format \t addr_hi \t width \t height \t "
+ "rotation \t mirror \t sw_mode \t "
+ "dcc_en \t blank_en \t ttu_dis \t underflow \t "
+ "min_ttu_vblank \t qos_low_wm \t qos_high_wm \n");
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state s;
+
+ hubp1_read_state(TO_DCN10_HUBP(hubp), &s);
+
+ DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t "
+ "%xh \t %xh \t %xh \t "
+ "%d \t %d \t %d \t %xh \t",
+ i,
+ s.pixel_format,
+ s.inuse_addr_hi,
+ s.viewport_width,
+ s.viewport_height,
+ s.rotation_angle,
+ s.h_mirror_en,
+ s.sw_mode,
+ s.dcc_en,
+ s.blank_en,
+ s.ttu_disable,
+ s.underflow_status);
+ DTN_INFO_MICRO_SEC(s.min_ttu_vblank);
+ DTN_INFO_MICRO_SEC(s.qos_level_low_wm);
+ DTN_INFO_MICRO_SEC(s.qos_level_high_wm);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+
+ DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t "
+ "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n");
+
+ for (i = 0; i < pool->res_cap->num_timing_generator; i++) {
+ struct timing_generator *tg = pool->timing_generators[i];
+ struct dcn_otg_state s = {0};
+
+ tgn10_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+ //only print if OTG master is enabled
+ if ((s.otg_enabled & 1) == 0)
+ continue;
+
+ DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t "
+ "%d \t %d \t %d \t %d \t %d \t %d \t "
+ "%d \t %d \t %d \t %d \t %d \t ",
+ i,
+ s.v_blank_start,
+ s.v_blank_end,
+ s.v_sync_a_start,
+ s.v_sync_a_end,
+ s.v_sync_a_pol,
+ s.v_total_max,
+ s.v_total_min,
+ s.h_blank_start,
+ s.h_blank_end,
+ s.h_sync_a_start,
+ s.h_sync_a_end,
+ s.h_sync_a_pol,
+ s.h_total,
+ s.v_total,
+ s.underflow_occurred_status);
+ DTN_INFO("\n");
+ }
+ DTN_INFO("\n");
+
+ log_mpc_crc(dc);
+
+ DTN_INFO_END();
+}
+
+static void verify_allow_pstate_change_high(
+ struct dce_hwseq *hws)
+{
+ /* pstate latency is ~20us so if we wait over 40us and pstate allow
+ * still not asserted, we are probably stuck and going to hang
+ *
+ * TODO: Figure out why it takes ~100us on linux
+ * pstate takes around ~100us on linux. Unknown currently as to
+ * why it takes that long on linux
+ */
+ static unsigned int pstate_wait_timeout_us = 200;
+ static unsigned int pstate_wait_expected_timeout_us = 40;
+ static unsigned int max_sampled_pstate_wait_us; /* data collection */
+ static bool forced_pstate_allow; /* help with revert wa */
+ static bool should_log_hw_state; /* prevent hw state log by default */
+
+ unsigned int debug_index = 0x7;
+ unsigned int debug_data;
+ unsigned int i;
+
+ if (forced_pstate_allow) {
+ /* we hacked to force pstate allow to prevent hang last time
+ * we verify_allow_pstate_change_high. so disable force
+ * here so we can check status
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 0);
+ forced_pstate_allow = false;
+ }
+
+ /* description "3-0: Pipe0 cursor0 QOS
+ * 7-4: Pipe1 cursor0 QOS
+ * 11-8: Pipe2 cursor0 QOS
+ * 15-12: Pipe3 cursor0 QOS
+ * 16: Pipe0 Plane0 Allow Pstate Change
+ * 17: Pipe1 Plane0 Allow Pstate Change
+ * 18: Pipe2 Plane0 Allow Pstate Change
+ * 19: Pipe3 Plane0 Allow Pstate Change
+ * 20: Pipe0 Plane1 Allow Pstate Change
+ * 21: Pipe1 Plane1 Allow Pstate Change
+ * 22: Pipe2 Plane1 Allow Pstate Change
+ * 23: Pipe3 Plane1 Allow Pstate Change
+ * 24: Pipe0 cursor0 Allow Pstate Change
+ * 25: Pipe1 cursor0 Allow Pstate Change
+ * 26: Pipe2 cursor0 Allow Pstate Change
+ * 27: Pipe3 cursor0 Allow Pstate Change
+ * 28: WB0 Allow Pstate Change
+ * 29: WB1 Allow Pstate Change
+ * 30: Arbiter's allow_pstate_change
+ * 31: SOC pstate change request
+ */
+
+ REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, debug_index);
+
+ for (i = 0; i < pstate_wait_timeout_us; i++) {
+ debug_data = REG_READ(DCHUBBUB_TEST_DEBUG_DATA);
+
+ if (debug_data & (1 << 30)) {
+
+ if (i > pstate_wait_expected_timeout_us)
+ dm_logger_write(hws->ctx->logger, LOG_WARNING,
+ "pstate took longer than expected ~%dus\n",
+ i);
+
+ return;
+ }
+ if (max_sampled_pstate_wait_us < i)
+ max_sampled_pstate_wait_us = i;
+
+ udelay(1);
+ }
+
+ /* force pstate allow to prevent system hang
+ * and break to debugger to investigate
+ */
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, 1,
+ DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, 1);
+ forced_pstate_allow = true;
+
+ if (should_log_hw_state) {
+ dcn10_log_hw_state(hws->ctx->dc);
+ }
+
+ dm_logger_write(hws->ctx->logger, LOG_WARNING,
+ "pstate TEST_DEBUG_DATA: 0x%X\n",
+ debug_data);
+ BREAK_TO_DEBUGGER();
+}
+
+static void enable_dppclk(
+ struct dce_hwseq *hws,
+ uint8_t plane_id,
+ uint32_t requested_pix_clk,
+ bool dppclk_div)
+{
+ dm_logger_write(hws->ctx->logger, LOG_SURFACE,
+ "dppclk_rate_control for pipe %d programed to %d\n",
+ plane_id,
+ dppclk_div);
+
+ if (hws->shifts->DPPCLK_RATE_CONTROL)
+ REG_UPDATE_2(DPP_CONTROL[plane_id],
+ DPPCLK_RATE_CONTROL, dppclk_div,
+ DPP_CLOCK_ENABLE, 1);
+ else
+ REG_UPDATE(DPP_CONTROL[plane_id],
+ DPP_CLOCK_ENABLE, 1);
+}
+
+static void enable_power_gating_plane(
+ struct dce_hwseq *hws,
+ bool enable)
+{
+ bool force_on = 1; /* disable power gating */
+
+ if (enable)
+ force_on = 0;
+
+ /* DCHUBP0/1/2/3 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
+
+ /* DPP0/1/2/3 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
+ REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
+}
+
+static void disable_vga(
+ struct dce_hwseq *hws)
+{
+ REG_WRITE(D1VGA_CONTROL, 0);
+ REG_WRITE(D2VGA_CONTROL, 0);
+ REG_WRITE(D3VGA_CONTROL, 0);
+ REG_WRITE(D4VGA_CONTROL, 0);
+}
+
+static void dpp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_dpp_power_gate)
+ return;
+
+ switch (dpp_inst) {
+ case 0: /* DPP0 */
+ REG_UPDATE(DOMAIN1_PG_CONFIG,
+ DOMAIN1_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN1_PG_STATUS,
+ DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DPP1 */
+ REG_UPDATE(DOMAIN3_PG_CONFIG,
+ DOMAIN3_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN3_PG_STATUS,
+ DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DPP2 */
+ REG_UPDATE(DOMAIN5_PG_CONFIG,
+ DOMAIN5_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN5_PG_STATUS,
+ DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DPP3 */
+ REG_UPDATE(DOMAIN7_PG_CONFIG,
+ DOMAIN7_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN7_PG_STATUS,
+ DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static uint32_t convert_and_clamp(
+ uint32_t wm_ns,
+ uint32_t refclk_mhz,
+ uint32_t clamp_value)
+{
+ uint32_t ret_val = 0;
+ ret_val = wm_ns * refclk_mhz;
+ ret_val /= 1000;
+
+ if (ret_val > clamp_value)
+ ret_val = clamp_value;
+
+ return ret_val;
+}
+
+static void program_watermarks(
+ struct dce_hwseq *hws,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz)
+{
+ uint32_t force_en = hws->ctx->dc->debug.disable_stutter ? 1 : 0;
+ /*
+ * Need to clamp to max of the register values (i.e. no wrap)
+ * for dcn1, all wm registers are 21-bit wide
+ */
+ uint32_t prog_wm_value;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0);
+
+ /* Repeat for water mark set A, B, C and D. */
+ /* clock state A */
+ prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(watermarks->a.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.pte_meta_urgent_ns, prog_wm_value);
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->a.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+
+ /* clock state B */
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_B calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->b.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state C */
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.urgent_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_C calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->c.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n\n"
+ "HW register value = 0x%x\n",
+ watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ /* clock state D */
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.urgent_ns, refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.urgent_ns, prog_wm_value);
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.pte_meta_urgent_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.pte_meta_urgent_ns, prog_wm_value);
+
+
+ if (REG(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D)) {
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_ENTER_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.cstate_exit_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "SR_EXIT_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n",
+ watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
+ }
+
+
+ prog_wm_value = convert_and_clamp(
+ watermarks->d.cstate_pstate.pstate_change_ns,
+ refclk_mhz, 0x1fffff);
+ REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ dm_logger_write(hws->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
+ "HW register value = 0x%x\n\n",
+ watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+
+ REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
+
+ REG_UPDATE_2(DCHUBBUB_ARB_DRAM_STATE_CNTL,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, 0,
+ DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, force_en);
+
+#if 0
+ REG_UPDATE_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
+#endif
+}
+
+
+static void dcn10_update_dchub(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data)
+{
+ /* TODO: port code from dal2 */
+ switch (dh_data->fb_mode) {
+ case FRAME_BUFFER_MODE_ZFB_ONLY:
+ /*For ZFB case need to put DCHUB FB BASE and TOP upside down to indicate ZFB mode*/
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_TOP,
+ SDPIF_FB_TOP, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_FB_BASE,
+ SDPIF_FB_BASE, 0x0FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_MIXED_ZFB_AND_LOCAL:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, dh_data->zfb_phys_addr_base >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, dh_data->zfb_mc_base_addr >> 22);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, (dh_data->zfb_mc_base_addr +
+ dh_data->zfb_size_in_byte - 1) >> 22);
+ break;
+ case FRAME_BUFFER_MODE_LOCAL_ONLY:
+ /*Should not touch FB LOCATION (done by VBIOS on AsicInit table)*/
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BASE,
+ SDPIF_AGP_BASE, 0);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_BOT,
+ SDPIF_AGP_BOT, 0X03FFFF);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_AGP_TOP,
+ SDPIF_AGP_TOP, 0);
+ break;
+ default:
+ break;
+ }
+
+ dh_data->dchub_initialzied = true;
+ dh_data->dchub_info_valid = false;
+}
+
+static void hubp_pg_control(
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ uint32_t power_gate = power_on ? 0 : 1;
+ uint32_t pwr_status = power_on ? 0 : 2;
+
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
+
+ switch (hubp_inst) {
+ case 0: /* DCHUBP0 */
+ REG_UPDATE(DOMAIN0_PG_CONFIG,
+ DOMAIN0_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN0_PG_STATUS,
+ DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 1: /* DCHUBP1 */
+ REG_UPDATE(DOMAIN2_PG_CONFIG,
+ DOMAIN2_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN2_PG_STATUS,
+ DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 2: /* DCHUBP2 */
+ REG_UPDATE(DOMAIN4_PG_CONFIG,
+ DOMAIN4_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN4_PG_STATUS,
+ DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ case 3: /* DCHUBP3 */
+ REG_UPDATE(DOMAIN6_PG_CONFIG,
+ DOMAIN6_POWER_GATE, power_gate);
+
+ REG_WAIT(DOMAIN6_PG_STATUS,
+ DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
+ 1, 1000);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+static void power_on_plane(
+ struct dce_hwseq *hws,
+ int plane_id)
+{
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, plane_id, true);
+ hubp_pg_control(hws, plane_id, true);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(hws->ctx->logger, LOG_DEBUG,
+ "Un-gated front end for pipe %d\n", plane_id);
+ }
+}
+
+static void undo_DEGVIDCN10_253_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[0];
+ int pwr_status = 0;
+
+ REG_GET(DOMAIN0_PG_STATUS, DOMAIN0_PGFSM_PWR_STATUS, &pwr_status);
+ /* Don't need to blank if hubp is power gated*/
+ if (pwr_status == 2)
+ return;
+
+ hubp->funcs->set_blank(hubp, true);
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ hubp_pg_control(hws, 0, false);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+}
+
+static void apply_DEGVIDCN10_253_wa(struct dc *dc)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[0];
+
+ if (dc->debug.disable_stutter)
+ return;
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+
+ hubp_pg_control(hws, 0, true);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+
+ hubp->funcs->set_hubp_blank_en(hubp, false);
+}
+
+static void bios_golden_init(struct dc *dc)
+{
+ struct dc_bios *bp = dc->ctx->dc_bios;
+ int i;
+
+ /* initialize dcn global */
+ bp->funcs->enable_disp_power_gating(bp,
+ CONTROLLER_ID_D0, ASIC_PIPE_INIT);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ /* initialize dcn per pipe */
+ bp->funcs->enable_disp_power_gating(bp,
+ CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
+ }
+}
+
+static void dcn10_init_hw(struct dc *dc)
+{
+ int i;
+ struct abm *abm = dc->res_pool->abm;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ REG_WRITE(REFCLK_CNTL, 0);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
+ return;
+ }
+ /* end of FPGA. Below if real ASIC */
+
+ bios_golden_init(dc);
+
+ disable_vga(dc->hwseq);
+
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ dc->res_pool->mpc->funcs->remove(
+ dc->res_pool->mpc, &(dc->res_pool->opps[i]->mpc_tree),
+ dc->res_pool->opps[i]->inst, i);
+
+ /* Blank controller using driver code instead of
+ * command table.
+ */
+ tg->funcs->set_blank(tg, true);
+ hwss_wait_for_blank_complete(tg);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+ struct audio *audio = dc->res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ if (abm != NULL) {
+ abm->funcs->init_backlight(abm);
+ abm->funcs->abm_init(abm);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+
+ enable_power_gating_plane(dc->hwseq, true);
+}
+
+static enum dc_status dcn10_prog_pixclk_crtc_otg(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ bool enableStereo = stream->timing.timing_3d_format == TIMING_3D_FORMAT_NONE ?
+ false:true;
+ bool rightEyePolarity = stream->timing.flags.RIGHT_EYE_3D_POLARITY;
+
+
+ /* by upper caller loop, pipe0 is parent pipe and be called first.
+ * back end is set up by for pipe0. Other children pipe share back end
+ * with pipe 0. No program is needed.
+ */
+ if (pipe_ctx->top_pipe != NULL)
+ return DC_OK;
+
+ /* TODO check if timing_changed, disable stream if timing changed */
+
+ /* HW program guide assume display already disable
+ * by unplug sequence. OTG assume stop.
+ */
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
+
+ if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+ pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
+
+ pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
+
+ pipe_ctx->stream_res.tg->funcs->program_timing(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ true);
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp,
+ enableStereo,
+ rightEyePolarity);
+
+#if 0 /* move to after enable_crtc */
+ /* TODO: OPP FMT, ABM. etc. should be done here. */
+ /* or FPGA now. instance 0 only. TODO: move to opp.c */
+
+ inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
+
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
+ pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+#endif
+ /* program otg blank color */
+ color_space = stream->output_color_space;
+ color_space_to_black_color(dc, color_space, &black_color);
+ pipe_ctx->stream_res.tg->funcs->set_blank_color(
+ pipe_ctx->stream_res.tg,
+ &black_color);
+
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
+ hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
+
+ /* VTG is within DCHUB command block. DCFCLK is always on */
+ if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
+ BREAK_TO_DEBUGGER();
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ /* TODO program crtc source select for non-virtual signal*/
+ /* TODO program FMT */
+ /* TODO setup link_enc */
+ /* TODO set stream attributes */
+ /* TODO program audio */
+ /* TODO enable stream if timing changed */
+ /* TODO unblank stream if DP */
+
+ return DC_OK;
+}
+
+static void reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ int i;
+
+ if (pipe_ctx->stream_res.stream_enc == NULL) {
+ pipe_ctx->stream = NULL;
+ return;
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ /* DPMS may already disable */
+ if (!pipe_ctx->stream->dpms_off)
+ core_link_disable_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
+ }
+
+ /* by upper caller loop, parent pipe: pipe0, will be reset last.
+ * back end share by all pipes and will be disable only when disable
+ * parent pipe.
+ */
+ if (pipe_ctx->top_pipe == NULL) {
+ pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
+ break;
+
+ if (i == dc->res_pool->pipe_count)
+ return;
+
+ pipe_ctx->stream = NULL;
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Reset back end for pipe %d, tg:%d\n",
+ pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync */
+static void plane_atomic_disconnect(struct dc *dc,
+ int fe_idx)
+{
+ struct hubp *hubp = dc->res_pool->hubps[fe_idx];
+ struct mpc *mpc = dc->res_pool->mpc;
+ int opp_id, z_idx;
+ int mpcc_id = -1;
+
+ /* look at tree rather than mi here to know if we already reset */
+ for (opp_id = 0; opp_id < dc->res_pool->pipe_count; opp_id++) {
+ struct output_pixel_processor *opp = dc->res_pool->opps[opp_id];
+
+ for (z_idx = 0; z_idx < opp->mpc_tree.num_pipes; z_idx++) {
+ if (opp->mpc_tree.dpp[z_idx] == fe_idx) {
+ mpcc_id = opp->mpc_tree.mpcc[z_idx];
+ break;
+ }
+ }
+ if (mpcc_id != -1)
+ break;
+ }
+ /*Already reset*/
+ if (opp_id == dc->res_pool->pipe_count)
+ return;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+ hubp->funcs->dcc_control(hubp, false, false);
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ mpc->funcs->remove(mpc, &(dc->res_pool->opps[opp_id]->mpc_tree),
+ dc->res_pool->opps[opp_id]->inst, fe_idx);
+}
+
+/* disable HW used by plane.
+ * note: cannot disable until disconnect is complete */
+static void plane_atomic_disable(struct dc *dc,
+ int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = dc->res_pool->hubps[fe_idx];
+ struct mpc *mpc = dc->res_pool->mpc;
+ int opp_id = hubp->opp_id;
+
+ if (opp_id == 0xf)
+ return;
+
+ mpc->funcs->wait_for_idle(mpc, hubp->mpcc_id);
+ dc->res_pool->opps[hubp->opp_id]->mpcc_disconnect_pending[hubp->mpcc_id] = false;
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: atomic disable finished on mpcc %d]\n",
+ fe_idx);*/
+
+ hubp->funcs->set_blank(hubp, true);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ REG_UPDATE(HUBP_CLK_CNTL[fe_idx],
+ HUBP_CLOCK_ENABLE, 0);
+ REG_UPDATE(DPP_CONTROL[fe_idx],
+ DPP_CLOCK_ENABLE, 0);
+
+ if (dc->res_pool->opps[opp_id]->mpc_tree.num_pipes == 0)
+ REG_UPDATE(OPP_PIPE_CONTROL[opp_id],
+ OPP_PIPE_CLOCK_EN, 0);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+/*
+ * kill power to plane hw
+ * note: cannot power down until plane is disable
+ */
+static void plane_atomic_power_down(struct dc *dc, int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, fe_idx, false);
+ hubp_pg_control(hws, fe_idx, false);
+ dpp->funcs->dpp_reset(dpp);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Power gated front end %d\n", fe_idx);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+}
+
+
+static void reset_front_end(
+ struct dc *dc,
+ int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct timing_generator *tg;
+ int opp_id = dc->res_pool->hubps[fe_idx]->opp_id;
+
+ /*Already reset*/
+ if (opp_id == 0xf)
+ return;
+
+ tg = dc->res_pool->timing_generators[opp_id];
+ tg->funcs->lock(tg);
+
+ plane_atomic_disconnect(dc, fe_idx);
+
+ REG_UPDATE(OTG_GLOBAL_SYNC_STATUS[tg->inst], VUPDATE_NO_LOCK_EVENT_CLEAR, 1);
+ tg->funcs->unlock(tg);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(hws);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_GLOBAL_SYNC_STATUS[tg->inst],
+ VUPDATE_NO_LOCK_EVENT_OCCURRED, 1,
+ 1, 100000);
+
+ plane_atomic_disable(dc, fe_idx);
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Reset front end %d\n",
+ fe_idx);
+}
+
+static void dcn10_power_down_fe(struct dc *dc, int fe_idx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dpp *dpp = dc->res_pool->dpps[fe_idx];
+
+ reset_front_end(dc, fe_idx);
+
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 1);
+ dpp_pg_control(hws, fe_idx, false);
+ hubp_pg_control(hws, fe_idx, false);
+ dpp->funcs->dpp_reset(dpp);
+ REG_SET(DC_IP_REQUEST_CNTL, 0,
+ IP_REQUEST_EN, 0);
+ dm_logger_write(dc->ctx->logger, LOG_DEBUG,
+ "Power gated front end %d\n", fe_idx);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static void reset_hw_ctx_wrap(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+
+ /* Reset Front End*/
+ /* Lock*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
+
+ if (cur_pipe_ctx->stream)
+ tg->funcs->lock(tg);
+ }
+ /* Disconnect*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->stream ||
+ !pipe_ctx->plane_state ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+
+ plane_atomic_disconnect(dc, i);
+ }
+ }
+ /* Unlock*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+ struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct timing_generator *tg = cur_pipe_ctx->stream_res.tg;
+
+ if (cur_pipe_ctx->stream)
+ tg->funcs->unlock(tg);
+ }
+
+ /* Disable and Powerdown*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /*if (!pipe_ctx_old->stream)
+ continue;*/
+
+ if (pipe_ctx->stream && pipe_ctx->plane_state
+ && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ plane_atomic_disable(dc, i);
+
+ if (!pipe_ctx->stream || !pipe_ctx->plane_state)
+ plane_atomic_power_down(dc, i);
+ }
+
+ /* Reset Back End*/
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+
+}
+
+static bool patch_address_for_sbs_tb_stereo(
+ struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ (pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
+ *addr = plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.left_addr =
+ plane_state->address.grph_stereo.right_addr;
+ return true;
+ } else {
+ if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
+ plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
+ plane_state->address.grph_stereo.right_addr =
+ plane_state->address.grph_stereo.left_addr;
+ }
+ }
+ return false;
+}
+
+static void toggle_watermark_change_req(struct dce_hwseq *hws)
+{
+ uint32_t watermark_change_req;
+
+ REG_GET(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, &watermark_change_req);
+
+ if (watermark_change_req)
+ watermark_change_req = 0;
+ else
+ watermark_change_req = 1;
+
+ REG_UPDATE(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req);
+}
+
+static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ bool addr_patched = false;
+ PHYSICAL_ADDRESS_LOC addr;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ if (plane_state == NULL)
+ return;
+ addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+ pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.hubp,
+ &plane_state->address,
+ plane_state->flip_immediate);
+ plane_state->status.requested_address = plane_state->address;
+ if (addr_patched)
+ pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
+}
+
+static bool dcn10_set_input_transfer_func(
+ struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state)
+{
+ struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ const struct dc_transfer_func *tf = NULL;
+ bool result = true;
+
+ if (dpp_base == NULL)
+ return false;
+
+ if (plane_state->in_transfer_func)
+ tf = plane_state->in_transfer_func;
+
+ if (plane_state->gamma_correction && dce_use_lut(plane_state))
+ dpp_base->funcs->ipp_program_input_lut(dpp_base,
+ plane_state->gamma_correction);
+
+ if (tf == NULL)
+ dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ else if (tf->type == TF_TYPE_PREDEFINED) {
+ switch (tf->tf) {
+ case TRANSFER_FUNCTION_SRGB:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_sRGB);
+ break;
+ case TRANSFER_FUNCTION_BT709:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_HW_xvYCC);
+ break;
+ case TRANSFER_FUNCTION_LINEAR:
+ dpp_base->funcs->ipp_set_degamma(dpp_base,
+ IPP_DEGAMMA_MODE_BYPASS);
+ break;
+ case TRANSFER_FUNCTION_PQ:
+ result = false;
+ break;
+ default:
+ result = false;
+ break;
+ }
+ } else if (tf->type == TF_TYPE_BYPASS) {
+ dpp_base->funcs->ipp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
+ } else {
+ /*TF_TYPE_DISTRIBUTED_POINTS*/
+ result = false;
+ }
+
+ return result;
+}
+/*modify the method to handle rgb for arr_points*/
+static bool convert_to_custom_float(
+ struct pwl_result_data *rgb_resulted,
+ struct curve_points *arr_points,
+ uint32_t hw_points_num)
+{
+ struct custom_float_format fmt;
+
+ struct pwl_result_data *rgb = rgb_resulted;
+
+ uint32_t i = 0;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].x,
+ &fmt,
+ &arr_points[0].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].offset,
+ &fmt,
+ &arr_points[0].custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[0].slope,
+ &fmt,
+ &arr_points[0].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 10;
+ fmt.sign = false;
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].x,
+ &fmt,
+ &arr_points[1].custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].y,
+ &fmt,
+ &arr_points[1].custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ arr_points[1].slope,
+ &fmt,
+ &arr_points[1].custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ while (i != hw_points_num) {
+ if (!convert_to_custom_float_format(
+ rgb->red,
+ &fmt,
+ &rgb->red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->green,
+ &fmt,
+ &rgb->green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->blue,
+ &fmt,
+ &rgb->blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_red,
+ &fmt,
+ &rgb->delta_red_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_green,
+ &fmt,
+ &rgb->delta_green_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!convert_to_custom_float_format(
+ rgb->delta_blue,
+ &fmt,
+ &rgb->delta_blue_reg)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ++rgb;
+ ++i;
+ }
+
+ return true;
+}
+#define MAX_REGIONS_NUMBER 34
+#define MAX_LOW_POINT 25
+#define NUMBER_SEGMENTS 32
+
+static bool dcn10_translate_regamma_to_hw_format(const struct dc_transfer_func
+ *output_tf, struct pwl_params *regamma_params)
+{
+ struct curve_points *arr_points;
+ struct pwl_result_data *rgb_resulted;
+ struct pwl_result_data *rgb;
+ struct pwl_result_data *rgb_plus_1;
+ struct fixed31_32 y_r;
+ struct fixed31_32 y_g;
+ struct fixed31_32 y_b;
+ struct fixed31_32 y1_min;
+ struct fixed31_32 y3_max;
+
+ int32_t segment_start, segment_end;
+ int32_t i;
+ uint32_t j, k, seg_distr[MAX_REGIONS_NUMBER], increment, start_index, hw_points;
+
+ if (output_tf == NULL || regamma_params == NULL ||
+ output_tf->type == TF_TYPE_BYPASS)
+ return false;
+
+ arr_points = regamma_params->arr_points;
+ rgb_resulted = regamma_params->rgb_resulted;
+ hw_points = 0;
+
+ memset(regamma_params, 0, sizeof(struct pwl_params));
+ memset(seg_distr, 0, sizeof(seg_distr));
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* 32 segments
+ * segments are from 2^-25 to 2^7
+ */
+ for (i = 0; i < 32 ; i++)
+ seg_distr[i] = 3;
+
+ segment_start = -25;
+ segment_end = 7;
+ } else {
+ /* 10 segments
+ * segment is from 2^-10 to 2^0
+ * There are less than 256 points, for optimization
+ */
+ seg_distr[0] = 3;
+ seg_distr[1] = 4;
+ seg_distr[2] = 4;
+ seg_distr[3] = 4;
+ seg_distr[4] = 4;
+ seg_distr[5] = 4;
+ seg_distr[6] = 4;
+ seg_distr[7] = 4;
+ seg_distr[8] = 5;
+ seg_distr[9] = 5;
+
+ segment_start = -10;
+ segment_end = 0;
+ }
+
+ for (i = segment_end - segment_start; i < MAX_REGIONS_NUMBER ; i++)
+ seg_distr[i] = -1;
+
+ for (k = 0; k < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1)
+ hw_points += (1 << seg_distr[k]);
+ }
+
+ j = 0;
+ for (k = 0; k < (segment_end - segment_start); k++) {
+ increment = NUMBER_SEGMENTS / (1 << seg_distr[k]);
+ start_index = (segment_start + k + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ for (i = start_index; i < start_index + NUMBER_SEGMENTS; i += increment) {
+ if (j == hw_points - 1)
+ break;
+ rgb_resulted[j].red = output_tf->tf_pts.red[i];
+ rgb_resulted[j].green = output_tf->tf_pts.green[i];
+ rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
+ j++;
+ }
+ }
+
+ /* last point */
+ start_index = (segment_end + MAX_LOW_POINT) * NUMBER_SEGMENTS;
+ rgb_resulted[hw_points - 1].red =
+ output_tf->tf_pts.red[start_index];
+ rgb_resulted[hw_points - 1].green =
+ output_tf->tf_pts.green[start_index];
+ rgb_resulted[hw_points - 1].blue =
+ output_tf->tf_pts.blue[start_index];
+
+ arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_start));
+ arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+ arr_points[2].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2),
+ dal_fixed31_32_from_int(segment_end));
+
+ y_r = rgb_resulted[0].red;
+ y_g = rgb_resulted[0].green;
+ y_b = rgb_resulted[0].blue;
+
+ y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b));
+
+ arr_points[0].y = y1_min;
+ arr_points[0].slope = dal_fixed31_32_div(
+ arr_points[0].y,
+ arr_points[0].x);
+ y_r = rgb_resulted[hw_points - 1].red;
+ y_g = rgb_resulted[hw_points - 1].green;
+ y_b = rgb_resulted[hw_points - 1].blue;
+
+ /* see comment above, m_arrPoints[1].y should be the Y value for the
+ * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
+ */
+ y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b));
+
+ arr_points[1].y = y3_max;
+ arr_points[2].y = y3_max;
+
+ arr_points[1].slope = dal_fixed31_32_zero;
+ arr_points[2].slope = dal_fixed31_32_zero;
+
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ /* for PQ, we want to have a straight line from last HW X point,
+ * and the slope to be such that we hit 1.0 at 10000 nits.
+ */
+ const struct fixed31_32 end_value =
+ dal_fixed31_32_from_int(125);
+
+ arr_points[1].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ arr_points[2].slope = dal_fixed31_32_div(
+ dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y),
+ dal_fixed31_32_sub(end_value, arr_points[1].x));
+ }
+
+ regamma_params->hw_points_num = hw_points;
+
+ i = 1;
+ for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) {
+ if (seg_distr[k] != -1) {
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+ regamma_params->arr_curve_points[i].offset =
+ regamma_params->arr_curve_points[k].
+ offset + (1 << seg_distr[k]);
+ }
+ i++;
+ }
+
+ if (seg_distr[k] != -1)
+ regamma_params->arr_curve_points[k].segments_num =
+ seg_distr[k];
+
+ rgb = rgb_resulted;
+ rgb_plus_1 = rgb_resulted + 1;
+
+ i = 1;
+
+ while (i != hw_points + 1) {
+ if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = rgb->red;
+ if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = rgb->green;
+ if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = rgb->blue;
+
+ rgb->delta_red = dal_fixed31_32_sub(
+ rgb_plus_1->red,
+ rgb->red);
+ rgb->delta_green = dal_fixed31_32_sub(
+ rgb_plus_1->green,
+ rgb->green);
+ rgb->delta_blue = dal_fixed31_32_sub(
+ rgb_plus_1->blue,
+ rgb->blue);
+
+ ++rgb_plus_1;
+ ++rgb;
+ ++i;
+ }
+
+ convert_to_custom_float(rgb_resulted, arr_points, hw_points);
+
+ return true;
+}
+
+static bool dcn10_set_output_transfer_func(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+{
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
+ if (dpp == NULL)
+ return false;
+
+ dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
+
+ if (stream->out_transfer_func &&
+ stream->out_transfer_func->type ==
+ TF_TYPE_PREDEFINED &&
+ stream->out_transfer_func->tf ==
+ TRANSFER_FUNCTION_SRGB) {
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_SRGB);
+ } else if (dcn10_translate_regamma_to_hw_format(
+ stream->out_transfer_func, &dpp->regamma_params)) {
+ dpp->funcs->opp_program_regamma_pwl(dpp, &dpp->regamma_params);
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_USER);
+ } else {
+ dpp->funcs->opp_set_regamma_mode(dpp, OPP_REGAMMA_BYPASS);
+ }
+
+ return true;
+}
+
+static void dcn10_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ struct hubp *hubp = NULL;
+ hubp = dc->res_pool->hubps[pipe->pipe_idx];
+ /* use TG master update lock to lock everything on the TG
+ * therefore only top pipe need to lock
+ */
+ if (pipe->top_pipe)
+ return;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ if (lock)
+ pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static bool wait_for_reset_trigger_to_occur(
+ struct dc_context *dc_ctx,
+ struct timing_generator *tg)
+{
+ bool rc = false;
+
+ /* To avoid endless loop we wait at most
+ * frames_to_wait_on_triggered_reset frames for the reset to occur. */
+ const uint32_t frames_to_wait_on_triggered_reset = 10;
+ int i;
+
+ for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
+
+ if (!tg->funcs->is_counter_moving(tg)) {
+ DC_ERROR("TG counter is not moving!\n");
+ break;
+ }
+
+ if (tg->funcs->did_triggered_reset_occur(tg)) {
+ rc = true;
+ /* usually occurs at i=1 */
+ DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
+ i);
+ break;
+ }
+
+ /* Wait for one frame. */
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
+ tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
+ }
+
+ if (false == rc)
+ DC_ERROR("GSL: Timeout on reset trigger!\n");
+
+ return rc;
+}
+
+static void dcn10_enable_timing_synchronization(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[])
+{
+ struct dc_context *dc_ctx = dc->ctx;
+ int i;
+
+ DC_SYNC_INFO("Setting up OTG reset trigger\n");
+
+ for (i = 1; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg, grouped_pipes[0]->stream_res.tg->inst);
+
+
+ DC_SYNC_INFO("Waiting for trigger\n");
+
+ /* Need to get only check 1 pipe for having reset as all the others are
+ * synchronized. Look at last pipe programmed to reset.
+ */
+ wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
+ for (i = 1; i < group_size; i++)
+ grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
+ grouped_pipes[i]->stream_res.tg);
+
+ DC_SYNC_INFO("Sync complete\n");
+}
+
+static void print_rq_dlg_ttu(
+ struct dc *core_dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML TTU Output parameters [%d] ==============\n"
+ "qos_level_low_wm: %d, \n"
+ "qos_level_high_wm: %d, \n"
+ "min_ttu_vblank: %d, \n"
+ "qos_level_flip: %d, \n"
+ "refcyc_per_req_delivery_l: %d, \n"
+ "qos_level_fixed_l: %d, \n"
+ "qos_ramp_disable_l: %d, \n"
+ "refcyc_per_req_delivery_pre_l: %d, \n"
+ "refcyc_per_req_delivery_c: %d, \n"
+ "qos_level_fixed_c: %d, \n"
+ "qos_ramp_disable_c: %d, \n"
+ "refcyc_per_req_delivery_pre_c: %d\n"
+ "=============================================================\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->ttu_regs.qos_level_low_wm,
+ pipe_ctx->ttu_regs.qos_level_high_wm,
+ pipe_ctx->ttu_regs.min_ttu_vblank,
+ pipe_ctx->ttu_regs.qos_level_flip,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
+ pipe_ctx->ttu_regs.qos_level_fixed_l,
+ pipe_ctx->ttu_regs.qos_ramp_disable_l,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
+ pipe_ctx->ttu_regs.qos_level_fixed_c,
+ pipe_ctx->ttu_regs.qos_ramp_disable_c,
+ pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML DLG Output parameters [%d] ==============\n"
+ "refcyc_h_blank_end: %d, \n"
+ "dlg_vblank_end: %d, \n"
+ "min_dst_y_next_start: %d, \n"
+ "refcyc_per_htotal: %d, \n"
+ "refcyc_x_after_scaler: %d, \n"
+ "dst_y_after_scaler: %d, \n"
+ "dst_y_prefetch: %d, \n"
+ "dst_y_per_vm_vblank: %d, \n"
+ "dst_y_per_row_vblank: %d, \n"
+ "ref_freq_to_pix_freq: %d, \n"
+ "vratio_prefetch: %d, \n"
+ "refcyc_per_pte_group_vblank_l: %d, \n"
+ "refcyc_per_meta_chunk_vblank_l: %d, \n"
+ "dst_y_per_pte_row_nom_l: %d, \n"
+ "refcyc_per_pte_group_nom_l: %d, \n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->dlg_regs.refcyc_h_blank_end,
+ pipe_ctx->dlg_regs.dlg_vblank_end,
+ pipe_ctx->dlg_regs.min_dst_y_next_start,
+ pipe_ctx->dlg_regs.refcyc_per_htotal,
+ pipe_ctx->dlg_regs.refcyc_x_after_scaler,
+ pipe_ctx->dlg_regs.dst_y_after_scaler,
+ pipe_ctx->dlg_regs.dst_y_prefetch,
+ pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
+ pipe_ctx->dlg_regs.dst_y_per_row_vblank,
+ pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
+ pipe_ctx->dlg_regs.vratio_prefetch,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
+ pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\ndst_y_per_meta_row_nom_l: %d, \n"
+ "refcyc_per_meta_chunk_nom_l: %d, \n"
+ "refcyc_per_line_delivery_pre_l: %d, \n"
+ "refcyc_per_line_delivery_l: %d, \n"
+ "vratio_prefetch_c: %d, \n"
+ "refcyc_per_pte_group_vblank_c: %d, \n"
+ "refcyc_per_meta_chunk_vblank_c: %d, \n"
+ "dst_y_per_pte_row_nom_c: %d, \n"
+ "refcyc_per_pte_group_nom_c: %d, \n"
+ "dst_y_per_meta_row_nom_c: %d, \n"
+ "refcyc_per_meta_chunk_nom_c: %d, \n"
+ "refcyc_per_line_delivery_pre_c: %d, \n"
+ "refcyc_per_line_delivery_c: %d \n"
+ "========================================================\n",
+ pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
+ pipe_ctx->dlg_regs.vratio_prefetch_c,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
+ pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
+ pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
+ pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
+ );
+
+ dm_logger_write(core_dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== DML RQ Output parameters [%d] ==============\n"
+ "chunk_size: %d \n"
+ "min_chunk_size: %d \n"
+ "meta_chunk_size: %d \n"
+ "min_meta_chunk_size: %d \n"
+ "dpte_group_size: %d \n"
+ "mpte_group_size: %d \n"
+ "swath_height: %d \n"
+ "pte_row_height_linear: %d \n"
+ "========================================================\n",
+ pipe_ctx->pipe_idx,
+ pipe_ctx->rq_regs.rq_regs_l.chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
+ pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
+ pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
+ pipe_ctx->rq_regs.rq_regs_l.swath_height,
+ pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
+ );
+}
+
+static void dcn10_power_on_fe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ power_on_plane(dc->hwseq,
+ pipe_ctx->pipe_idx);
+
+ /* enable DCFCLK current DCHUB */
+ REG_UPDATE(HUBP_CLK_CNTL[pipe_ctx->pipe_idx],
+ HUBP_CLOCK_ENABLE, 1);
+
+ /* make sure OPP_PIPE_CLOCK_EN = 1 */
+ REG_UPDATE(OPP_PIPE_CONTROL[pipe_ctx->stream_res.tg->inst],
+ OPP_PIPE_CLOCK_EN, 1);
+ /*TODO: REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, 0x1f);*/
+
+ if (plane_state) {
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Pipe:%d 0x%x: addr hi:0x%x, "
+ "addr low:0x%x, "
+ "src: %d, %d, %d,"
+ " %d; dst: %d, %d, %d, %d;\n",
+ pipe_ctx->pipe_idx,
+ plane_state,
+ plane_state->address.grph.addr.high_part,
+ plane_state->address.grph.addr.low_part,
+ plane_state->src_rect.x,
+ plane_state->src_rect.y,
+ plane_state->src_rect.width,
+ plane_state->src_rect.height,
+ plane_state->dst_rect.x,
+ plane_state->dst_rect.y,
+ plane_state->dst_rect.width,
+ plane_state->dst_rect.height);
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Pipe %d: width, height, x, y format:%d\n"
+ "viewport:%d, %d, %d, %d\n"
+ "recout: %d, %d, %d, %d\n",
+ pipe_ctx->pipe_idx,
+ plane_state->format,
+ pipe_ctx->plane_res.scl_data.viewport.width,
+ pipe_ctx->plane_res.scl_data.viewport.height,
+ pipe_ctx->plane_res.scl_data.viewport.x,
+ pipe_ctx->plane_res.scl_data.viewport.y,
+ pipe_ctx->plane_res.scl_data.recout.width,
+ pipe_ctx->plane_res.scl_data.recout.height,
+ pipe_ctx->plane_res.scl_data.recout.x,
+ pipe_ctx->plane_res.scl_data.recout.y);
+ print_rq_dlg_ttu(dc, pipe_ctx);
+ }
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+}
+
+static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
+{
+ struct dpp_grph_csc_adjustment adjust;
+ memset(&adjust, 0, sizeof(adjust));
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
+
+
+ if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ adjust.temperature_matrix[0] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[0];
+ adjust.temperature_matrix[1] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[1];
+ adjust.temperature_matrix[2] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[2];
+ adjust.temperature_matrix[3] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[4];
+ adjust.temperature_matrix[4] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[5];
+ adjust.temperature_matrix[5] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[6];
+ adjust.temperature_matrix[6] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[8];
+ adjust.temperature_matrix[7] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[9];
+ adjust.temperature_matrix[8] =
+ pipe_ctx->stream->
+ gamut_remap_matrix.matrix[10];
+ }
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
+}
+
+
+static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix)
+{
+ int i;
+ struct out_csc_color_matrix tbl_entry;
+
+ if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
+ == true) {
+ enum dc_color_space color_space =
+ pipe_ctx->stream->output_color_space;
+
+ //uint16_t matrix[12];
+ for (i = 0; i < 12; i++)
+ tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
+
+ tbl_entry.color_space = color_space;
+ //tbl_entry.regval = matrix;
+ pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry);
+ }
+}
+static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ return false;
+}
+
+static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+{
+ if (pipe_ctx->plane_state->visible)
+ return true;
+ if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
+ return true;
+ if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
+ return true;
+ return false;
+}
+
+static bool is_rgb_cspace(enum dc_color_space output_color_space)
+{
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ case COLOR_SPACE_SRGB_LIMITED:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_ADOBERGB:
+ return true;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ case COLOR_SPACE_2020_YCBCR:
+ return false;
+ default:
+ /* Add a case to switch */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+static void dcn10_get_surface_visual_confirm_color(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
+
+ switch (pipe_ctx->plane_res.scl_data.format) {
+ case PIXEL_FORMAT_ARGB8888:
+ /* set boarder color to red */
+ color->color_r_cr = color_value;
+ break;
+
+ case PIXEL_FORMAT_ARGB2101010:
+ /* set boarder color to blue */
+ color->color_b_cb = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP8:
+ /* set boarder color to green */
+ color->color_g_y = color_value;
+ break;
+ case PIXEL_FORMAT_420BPP10:
+ /* set boarder color to yellow */
+ color->color_g_y = color_value;
+ color->color_r_cr = color_value;
+ break;
+ case PIXEL_FORMAT_FP16:
+ /* set boarder color to white */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
+ color->color_g_y = color_value;
+ break;
+ default:
+ break;
+ }
+}
+
+static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
+ struct vm_system_aperture_param *apt,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC physical_page_number;
+ uint32_t logical_addr_low;
+ uint32_t logical_addr_high;
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
+ REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ LOGICAL_ADDR, &logical_addr_low);
+
+ REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ LOGICAL_ADDR, &logical_addr_high);
+
+ apt->sys_default.quad_part = physical_page_number.quad_part << 12;
+ apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
+ apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
+}
+
+/* Temporary read settings, future will get values from kmd directly */
+static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
+ struct vm_context0_param *vm0,
+ struct dce_hwseq *hws)
+{
+ PHYSICAL_ADDRESS_LOC fb_base;
+ PHYSICAL_ADDRESS_LOC fb_offset;
+ uint32_t fb_base_value;
+ uint32_t fb_offset_value;
+
+ REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
+ REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
+
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
+ REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
+
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
+ REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
+
+ /*
+ * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
+ * Therefore we need to do
+ * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
+ */
+ fb_base.quad_part = (uint64_t)fb_base_value << 24;
+ fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
+ vm0->pte_base.quad_part += fb_base.quad_part;
+ vm0->pte_base.quad_part -= fb_offset.quad_part;
+}
+
+static void dcn10_program_pte_vm(struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation,
+ struct dce_hwseq *hws)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+ struct vm_system_aperture_param apt = { {{ 0 } } };
+ struct vm_context0_param vm0 = { { { 0 } } };
+
+
+ mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
+ mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
+
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+ hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
+}
+
+static void update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ union plane_size size = plane_state->plane_size;
+ struct mpcc_cfg mpcc_cfg = {0};
+ struct pipe_ctx *top_pipe;
+ bool per_pixel_alpha = plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+
+ /* TODO: proper fix once fpga works */
+ /* depends on DML calculation, DPP clock value may change dynamically */
+ enable_dppclk(
+ dc->hwseq,
+ pipe_ctx->pipe_idx,
+ pipe_ctx->stream_res.pix_clk_params.requested_pix_clk,
+ context->bw.dcn.calc_clk.dppclk_div);
+ dc->current_state->bw.dcn.cur_clk.dppclk_div =
+ context->bw.dcn.calc_clk.dppclk_div;
+ context->bw.dcn.cur_clk.dppclk_div = context->bw.dcn.calc_clk.dppclk_div;
+
+ /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
+ * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
+ * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
+ */
+ REG_UPDATE(DCHUBP_CNTL[pipe_ctx->pipe_idx], HUBP_VTG_SEL, pipe_ctx->stream_res.tg->inst);
+
+ hubp->funcs->hubp_setup(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs,
+ &pipe_ctx->rq_regs,
+ &pipe_ctx->pipe_dlg_param);
+
+ size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+
+ if (dc->config.gpu_vm_support)
+ dcn10_program_pte_vm(
+ pipe_ctx->plane_res.hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ plane_state->rotation,
+ hws
+ );
+
+ dpp->funcs->ipp_setup(dpp,
+ plane_state->format,
+ EXPANSION_MODE_ZERO);
+
+ mpcc_cfg.dpp_id = hubp->inst;
+ mpcc_cfg.opp_id = pipe_ctx->stream_res.opp->inst;
+ mpcc_cfg.tree_cfg = &(pipe_ctx->stream_res.opp->mpc_tree);
+ for (top_pipe = pipe_ctx->top_pipe; top_pipe; top_pipe = top_pipe->top_pipe)
+ mpcc_cfg.z_index++;
+ if (dc->debug.surface_visual_confirm)
+ dcn10_get_surface_visual_confirm_color(
+ pipe_ctx, &mpcc_cfg.black_color);
+ else
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space,
+ &mpcc_cfg.black_color);
+ mpcc_cfg.per_pixel_alpha = per_pixel_alpha;
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha.
+ */
+ mpcc_cfg.pre_multiplied_alpha = is_rgb_cspace(
+ pipe_ctx->stream->output_color_space)
+ && per_pixel_alpha;
+ hubp->mpcc_id = dc->res_pool->mpc->funcs->add(dc->res_pool->mpc, &mpcc_cfg);
+ hubp->opp_id = mpcc_cfg.opp_id;
+
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
+ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ /* scaler configuration */
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
+ pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+
+ hubp->funcs->mem_program_viewport(hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+
+ /*gamut remap*/
+ program_gamut_remap(pipe_ctx);
+
+ program_csc_matrix(pipe_ctx,
+ pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix);
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ plane_state->format,
+ &plane_state->tiling_info,
+ &size,
+ plane_state->rotation,
+ &plane_state->dcc,
+ plane_state->horizontal_mirror);
+
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+
+ if (is_pipe_tree_visible(pipe_ctx))
+ hubp->funcs->set_blank(hubp, false);
+}
+
+
+static void program_all_pipe_in_tree(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context)
+{
+ unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000;
+
+ if (pipe_ctx->top_pipe == NULL) {
+
+ /* lock otg_master_update to process all pipes associated with
+ * this OTG. this is done only one time.
+ */
+ /* watermark is for all pipes */
+ program_watermarks(dc->hwseq, &context->bw.dcn.watermarks, ref_clk_mhz);
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after watermark update */
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->stream_res.tg->dlg_otg_param.vready_offset = pipe_ctx->pipe_dlg_param.vready_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vstartup_start = pipe_ctx->pipe_dlg_param.vstartup_start;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_offset = pipe_ctx->pipe_dlg_param.vupdate_offset;
+ pipe_ctx->stream_res.tg->dlg_otg_param.vupdate_width = pipe_ctx->pipe_dlg_param.vupdate_width;
+ pipe_ctx->stream_res.tg->dlg_otg_param.signal = pipe_ctx->stream->signal;
+
+ pipe_ctx->stream_res.tg->funcs->program_global_sync(
+ pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, !is_pipe_tree_visible(pipe_ctx));
+ }
+
+ if (pipe_ctx->plane_state != NULL) {
+ struct dc_cursor_position position = { 0 };
+ struct pipe_ctx *cur_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx];
+
+ dcn10_power_on_fe(dc, pipe_ctx, context);
+
+ /* temporary dcn1 wa:
+ * watermark update requires toggle after a/b/c/d sets are programmed
+ * if hubp is pg then wm value doesn't get properaged to hubp
+ * need to toggle after ungate to ensure wm gets to hubp.
+ *
+ * final solution: we need to get SMU to do the toggle as
+ * DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST is owned by SMU we should have
+ * both driver and fw accessing same register
+ */
+ toggle_watermark_change_req(dc->hwseq);
+
+ update_dchubp_dpp(dc, pipe_ctx, context);
+
+ /* TODO: this is a hack w/a for switching from mpo to pipe split */
+ dc_stream_set_cursor_position(pipe_ctx->stream, &position);
+
+ dc_stream_set_cursor_attributes(pipe_ctx->stream,
+ &pipe_ctx->stream->cursor_attributes);
+
+ if (cur_pipe_ctx->plane_state != pipe_ctx->plane_state) {
+ dc->hwss.set_input_transfer_func(
+ pipe_ctx, pipe_ctx->plane_state);
+ dc->hwss.set_output_transfer_func(
+ pipe_ctx, pipe_ctx->stream);
+ }
+ }
+
+ if (dc->debug.sanity_checks) {
+ /* pstate stuck check after each pipe is programmed */
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
+ program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
+}
+
+static void dcn10_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync = false;/*todo*/
+ pp_display_cfg->nb_pstate_switch_disable = false;
+ pp_display_cfg->min_engine_clock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = context->bw.dcn.cur_clk.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz;
+ pp_display_cfg->avail_mclk_switch_time_us =
+ context->bw.dcn.cur_clk.dram_ccm_us > 0 ? context->bw.dcn.cur_clk.dram_ccm_us : 0;
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us =
+ context->bw.dcn.cur_clk.min_active_dram_ccm_us > 0 ? context->bw.dcn.cur_clk.min_active_dram_ccm_us : 0;
+ pp_display_cfg->min_dcfclock_khz = context->bw.dcn.cur_clk.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = context->bw.dcn.cur_clk.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
+ struct dm_pp_display_configuration)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+
+ dc->prev_display_config = *pp_display_cfg;
+}
+
+static void optimize_shared_resources(struct dc *dc)
+{
+ if (dc->current_state->stream_count == 0) {
+ apply_DEGVIDCN10_253_wa(dc);
+ /* S0i2 message */
+ dcn10_pplib_apply_display_requirements(dc, dc->current_state);
+ }
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+}
+
+static void ready_shared_resources(struct dc *dc, struct dc_state *context)
+{
+ if (dc->current_state->stream_count == 0 &&
+ !dc->debug.disable_stutter)
+ undo_DEGVIDCN10_253_wa(dc);
+
+ /* S0i2 message */
+ if (dc->current_state->stream_count == 0 &&
+ context->stream_count != 0)
+ dcn10_pplib_apply_display_requirements(dc, context);
+}
+
+static void dcn10_apply_ctx_for_surface(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context)
+{
+ int i, be_idx;
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ be_idx = -1;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (stream == context->res_ctx.pipe_ctx[i].stream) {
+ be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst;
+ break;
+ }
+ }
+
+ ASSERT(be_idx != -1);
+
+ if (num_planes == 0) {
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (old_pipe_ctx->stream_res.tg && old_pipe_ctx->stream_res.tg->inst == be_idx) {
+ old_pipe_ctx->stream_res.tg->funcs->set_blank(old_pipe_ctx->stream_res.tg, true);
+ dcn10_power_down_fe(dc, old_pipe_ctx->pipe_idx);
+ }
+ }
+ return;
+ }
+
+ /* reset unused mpcc */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
+ continue;
+
+ /*
+ * Powergate reused pipes that are not powergated
+ * fairly hacky right now, using opp_id as indicator
+ */
+
+ if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
+ if (pipe_ctx->plane_res.hubp->opp_id != 0xf && pipe_ctx->stream_res.tg->inst == be_idx) {
+ dcn10_power_down_fe(dc, pipe_ctx->pipe_idx);
+ /*
+ * power down fe will unlock when calling reset, need
+ * to lock it back here. Messy, need rework.
+ */
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ }
+ }
+
+
+ if ((!pipe_ctx->plane_state && old_pipe_ctx->plane_state)
+ || (!pipe_ctx->stream && old_pipe_ctx->stream)) {
+ if (old_pipe_ctx->stream_res.tg->inst != be_idx)
+ continue;
+
+ if (!old_pipe_ctx->top_pipe) {
+ ASSERT(0);
+ continue;
+ }
+
+ /* reset mpc */
+ dc->res_pool->mpc->funcs->remove(
+ dc->res_pool->mpc,
+ &(old_pipe_ctx->stream_res.opp->mpc_tree),
+ old_pipe_ctx->stream_res.opp->inst,
+ old_pipe_ctx->pipe_idx);
+ old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[old_pipe_ctx->plane_res.hubp->mpcc_id] = true;
+
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: apply_ctx disconnect pending on mpcc %d]\n",
+ old_pipe_ctx->mpcc->inst);*/
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+
+ old_pipe_ctx->top_pipe = NULL;
+ old_pipe_ctx->bottom_pipe = NULL;
+ old_pipe_ctx->plane_state = NULL;
+ old_pipe_ctx->stream = NULL;
+
+ dm_logger_write(dc->ctx->logger, LOG_DC,
+ "Reset mpcc for pipe %d\n",
+ old_pipe_ctx->pipe_idx);
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ /* looking for top pipe to program */
+ if (!pipe_ctx->top_pipe)
+ program_all_pipe_in_tree(dc, pipe_ctx, context);
+ }
+
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\n============== Watermark parameters ==============\n"
+ "a.urgent_ns: %d \n"
+ "a.cstate_enter_plus_exit: %d \n"
+ "a.cstate_exit: %d \n"
+ "a.pstate_change: %d \n"
+ "a.pte_meta_urgent: %d \n"
+ "b.urgent_ns: %d \n"
+ "b.cstate_enter_plus_exit: %d \n"
+ "b.cstate_exit: %d \n"
+ "b.pstate_change: %d \n"
+ "b.pte_meta_urgent: %d \n",
+ context->bw.dcn.watermarks.a.urgent_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.a.pte_meta_urgent_ns,
+ context->bw.dcn.watermarks.b.urgent_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.b.pte_meta_urgent_ns
+ );
+ dm_logger_write(dc->ctx->logger, LOG_BANDWIDTH_CALCS,
+ "\nc.urgent_ns: %d \n"
+ "c.cstate_enter_plus_exit: %d \n"
+ "c.cstate_exit: %d \n"
+ "c.pstate_change: %d \n"
+ "c.pte_meta_urgent: %d \n"
+ "d.urgent_ns: %d \n"
+ "d.cstate_enter_plus_exit: %d \n"
+ "d.cstate_exit: %d \n"
+ "d.pstate_change: %d \n"
+ "d.pte_meta_urgent: %d \n"
+ "========================================================\n",
+ context->bw.dcn.watermarks.c.urgent_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.c.pte_meta_urgent_ns,
+ context->bw.dcn.watermarks.d.urgent_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns,
+ context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns,
+ context->bw.dcn.watermarks.d.pte_meta_urgent_ns
+ );
+
+ if (dc->debug.sanity_checks)
+ verify_allow_pstate_change_high(dc->hwseq);
+}
+
+static void dcn10_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
+ if (decrease_allowed || context->bw.dcn.calc_clk.dispclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dispclk_khz) {
+ dc->res_pool->display_clock->funcs->set_clock(
+ dc->res_pool->display_clock,
+ context->bw.dcn.calc_clk.dispclk_khz);
+ dc->current_state->bw.dcn.cur_clk.dispclk_khz =
+ context->bw.dcn.calc_clk.dispclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_khz) {
+ smu_req.hard_min_dcefclk_khz =
+ context->bw.dcn.calc_clk.dcfclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.fclk_khz
+ > dc->current_state->bw.dcn.cur_clk.fclk_khz) {
+ smu_req.hard_min_fclk_khz = context->bw.dcn.calc_clk.fclk_khz;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz
+ > dc->current_state->bw.dcn.cur_clk.dcfclk_deep_sleep_khz) {
+ dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz =
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+ context->bw.dcn.cur_clk.dcfclk_deep_sleep_khz =
+ context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz;
+ }
+
+ smu_req.display_count = context->stream_count;
+
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ *smu_req_cur = smu_req;
+
+ /* Decrease in freq is increase in period so opposite comparison for dram_ccm */
+ if (decrease_allowed || context->bw.dcn.calc_clk.dram_ccm_us
+ < dc->current_state->bw.dcn.cur_clk.dram_ccm_us) {
+ dc->current_state->bw.dcn.calc_clk.dram_ccm_us =
+ context->bw.dcn.calc_clk.dram_ccm_us;
+ context->bw.dcn.cur_clk.dram_ccm_us =
+ context->bw.dcn.calc_clk.dram_ccm_us;
+ }
+ if (decrease_allowed || context->bw.dcn.calc_clk.min_active_dram_ccm_us
+ < dc->current_state->bw.dcn.cur_clk.min_active_dram_ccm_us) {
+ dc->current_state->bw.dcn.calc_clk.min_active_dram_ccm_us =
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ context->bw.dcn.cur_clk.min_active_dram_ccm_us =
+ context->bw.dcn.calc_clk.min_active_dram_ccm_us;
+ }
+ dcn10_pplib_apply_display_requirements(dc, context);
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ /* need to fix this function. not doing the right thing here */
+}
+
+static void set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, int vmin, int vmax)
+{
+ int i = 0;
+ struct drr_params params = {0};
+
+ params.vertical_total_max = vmax;
+ params.vertical_total_min = vmin;
+
+ /* TODO: If multiple pipes are to be supported, you need
+ * some GSL stuff
+ */
+ for (i = 0; i < num_pipes; i++) {
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(pipe_ctx[i]->stream_res.tg, &params);
+ }
+}
+
+static void get_position(struct pipe_ctx **pipe_ctx,
+ int num_pipes,
+ struct crtc_position *position)
+{
+ int i = 0;
+
+ /* TODO: handle pipes > 1
+ */
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
+}
+
+static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events)
+{
+ unsigned int i;
+ unsigned int value = 0;
+
+ if (events->surface_update)
+ value |= 0x80;
+ if (events->cursor_update)
+ value |= 0x2;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->
+ set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
+}
+
+static void set_plane_config(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx)
+{
+ /* TODO */
+ program_gamut_remap(pipe_ctx);
+}
+
+static void dcn10_config_stereo_parameters(
+ struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
+{
+ enum view_3d_format view_format = stream->view_format;
+ enum dc_timing_3d_format timing_3d_format =\
+ stream->timing.timing_3d_format;
+ bool non_stereo_timing = false;
+
+ if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
+ timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
+ non_stereo_timing = true;
+
+ if (non_stereo_timing == false &&
+ view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
+
+ flags->PROGRAM_STEREO = 1;
+ flags->PROGRAM_POLARITY = 1;
+ if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
+ timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
+ timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
+ enum display_dongle_type dongle = \
+ stream->sink->link->ddc->dongle_type;
+ if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ flags->DISABLE_STEREO_DP_SYNC = 1;
+ }
+ flags->RIGHT_EYE_POLARITY =\
+ stream->timing.flags.RIGHT_EYE_3D_POLARITY;
+ if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
+ flags->FRAME_PACKED = 1;
+ }
+
+ return;
+}
+
+static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
+{
+ struct crtc_stereo_flags flags = { 0 };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ dcn10_config_stereo_parameters(stream, &flags);
+
+ pipe_ctx->stream_res.opp->funcs->opp_set_stereo_polarity(
+ pipe_ctx->stream_res.opp,
+ flags.PROGRAM_STEREO == 1 ? true:false,
+ stream->timing.flags.RIGHT_EYE_3D_POLARITY == 1 ? true:false);
+
+ pipe_ctx->stream_res.tg->funcs->program_stereo(
+ pipe_ctx->stream_res.tg,
+ &stream->timing,
+ &flags);
+
+ return;
+}
+
+static void dcn10_wait_for_mpcc_disconnect(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx)
+{
+ int i;
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i]) {
+ res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, i);
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[i] = false;
+ res_pool->hubps[i]->funcs->set_blank(res_pool->hubps[i], true);
+ /*dm_logger_write(dc->ctx->logger, LOG_ERROR,
+ "[debug_mpo: wait_for_mpcc finished waiting on mpcc %d]\n",
+ i);*/
+ }
+ }
+
+ if (dc->debug.sanity_checks) {
+ verify_allow_pstate_change_high(dc->hwseq);
+ }
+
+}
+
+static bool dcn10_dummy_display_power_gating(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating)
+{
+ return true;
+}
+
+void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (plane_state == NULL)
+ return;
+
+ plane_state->status.is_flip_pending =
+ pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
+ pipe_ctx->plane_res.hubp);
+
+ plane_state->status.current_address = pipe_ctx->plane_res.hubp->current_address;
+ if (pipe_ctx->plane_res.hubp->current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ tg->funcs->is_stereo_left_eye) {
+ plane_state->status.is_right_eye =
+ !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
+ }
+}
+
+
+
+static const struct hw_sequencer_funcs dcn10_funcs = {
+ .program_gamut_remap = program_gamut_remap,
+ .program_csc_matrix = program_csc_matrix,
+ .init_hw = dcn10_init_hw,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .set_plane_config = set_plane_config,
+ .update_plane_addr = dcn10_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .set_input_transfer_func = dcn10_set_input_transfer_func,
+ .set_output_transfer_func = dcn10_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .update_info_frame = dce110_update_info_frame,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dce110_unblank_stream,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .power_down_front_end = dcn10_power_down_fe,
+ .power_on_front_end = dcn10_power_on_fe,
+ .pipe_control_lock = dcn10_pipe_control_lock,
+ .set_bandwidth = dcn10_set_bandwidth,
+ .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg,
+ .set_drr = set_drr,
+ .get_position = get_position,
+ .set_static_screen_control = set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .ready_shared_resources = ready_shared_resources,
+ .optimize_shared_resources = optimize_shared_resources,
+ .edp_backlight_control = hwss_edp_backlight_control,
+ .edp_power_control = hwss_edp_power_control
+};
+
+
+void dcn10_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn10_funcs;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
new file mode 100644
index 000000000000..ca53dc1cc19b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -0,0 +1,38 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN10_H__
+#define __DC_HWSS_DCN10_H__
+
+#include "core_types.h"
+
+struct dc;
+
+void dcn10_hw_sequencer_construct(struct dc *dc);
+extern void fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
new file mode 100644
index 000000000000..08db1e6b5166
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn10_ipp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (ippn10->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ippn10->ipp_shift->field_name, ippn10->ipp_mask->field_name
+
+#define CTX \
+ ippn10->base.ctx
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static void dcn10_ipp_destroy(struct input_pixel_processor **ipp)
+{
+ kfree(TO_DCN10_IPP(*ipp));
+ *ipp = NULL;
+}
+
+static const struct ipp_funcs dcn10_ipp_funcs = {
+ .ipp_destroy = dcn10_ipp_destroy
+};
+
+void dcn10_ipp_construct(
+ struct dcn10_ipp *ippn10,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn10_ipp_registers *regs,
+ const struct dcn10_ipp_shift *ipp_shift,
+ const struct dcn10_ipp_mask *ipp_mask)
+{
+ ippn10->base.ctx = ctx;
+ ippn10->base.inst = inst;
+ ippn10->base.funcs = &dcn10_ipp_funcs;
+
+ ippn10->regs = regs;
+ ippn10->ipp_shift = ipp_shift;
+ ippn10->ipp_mask = ipp_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
new file mode 100644
index 000000000000..d7b5bd20352a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DCN10_IPP_H_
+#define _DCN10_IPP_H_
+
+#include "ipp.h"
+
+#define TO_DCN10_IPP(ipp)\
+ container_of(ipp, struct dcn10_ipp, base)
+
+#define IPP_REG_LIST_DCN(id) \
+ SRI(FORMAT_CONTROL, CNVC_CFG, id), \
+ SRI(DPP_CONTROL, DPP_TOP, id), \
+ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \
+ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR0, CNVC_CUR, id), \
+ SRI(CURSOR0_COLOR1, CNVC_CUR, id)
+
+#define IPP_REG_LIST_DCN10(id) \
+ IPP_REG_LIST_DCN(id), \
+ SRI(CURSOR_SETTINS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR, id), \
+ SRI(CURSOR_SIZE, CURSOR, id), \
+ SRI(CURSOR_CONTROL, CURSOR, id), \
+ SRI(CURSOR_POSITION, CURSOR, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR, id)
+
+#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+
+#define IPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define IPP_MASK_SH_LIST_DCN(mask_sh) \
+ IPP_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, ALPHA_EN, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \
+ IPP_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh)
+
+#define IPP_MASK_SH_LIST_DCN10(mask_sh) \
+ IPP_MASK_SH_LIST_DCN(mask_sh),\
+ IPP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ IPP_SF(HUBPREQ0_CURSOR_SETTINS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CURSOR0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ IPP_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh)
+
+#define IPP_DCN10_REG_FIELD_LIST(type) \
+ type CNVC_SURFACE_PIXEL_FORMAT; \
+ type CNVC_BYPASS; \
+ type ALPHA_EN; \
+ type FORMAT_EXPANSION_MODE; \
+ type CURSOR0_DST_Y_OFFSET; \
+ type CURSOR0_CHUNK_HDL_ADJUST; \
+ type CUR0_MODE; \
+ type CUR0_COLOR0; \
+ type CUR0_COLOR1; \
+ type CUR0_EXPANSION_MODE; \
+ type CURSOR_SURFACE_ADDRESS_HIGH; \
+ type CURSOR_SURFACE_ADDRESS; \
+ type CURSOR_WIDTH; \
+ type CURSOR_HEIGHT; \
+ type CURSOR_MODE; \
+ type CURSOR_2X_MAGNIFY; \
+ type CURSOR_PITCH; \
+ type CURSOR_LINES_PER_CHUNK; \
+ type CURSOR_ENABLE; \
+ type CUR0_ENABLE; \
+ type CURSOR_X_POSITION; \
+ type CURSOR_Y_POSITION; \
+ type CURSOR_HOT_SPOT_X; \
+ type CURSOR_HOT_SPOT_Y; \
+ type CURSOR_DST_X_OFFSET; \
+ type OUTPUT_FP
+
+struct dcn10_ipp_shift {
+ IPP_DCN10_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn10_ipp_mask {
+ IPP_DCN10_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_ipp_registers {
+ uint32_t DPP_CONTROL;
+ uint32_t CURSOR_SETTINS;
+ uint32_t CURSOR_SETTINGS;
+ uint32_t CNVC_SURFACE_PIXEL_FORMAT;
+ uint32_t CURSOR0_CONTROL;
+ uint32_t CURSOR0_COLOR0;
+ uint32_t CURSOR0_COLOR1;
+ uint32_t FORMAT_CONTROL;
+ uint32_t CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR_SIZE;
+ uint32_t CURSOR_CONTROL;
+ uint32_t CURSOR_POSITION;
+ uint32_t CURSOR_HOT_SPOT;
+ uint32_t CURSOR_DST_OFFSET;
+};
+
+struct dcn10_ipp {
+ struct input_pixel_processor base;
+
+ const struct dcn10_ipp_registers *regs;
+ const struct dcn10_ipp_shift *ipp_shift;
+ const struct dcn10_ipp_mask *ipp_mask;
+
+ struct dc_cursor_attributes curs_attr;
+};
+
+void dcn10_ipp_construct(struct dcn10_ipp *ippn10,
+ struct dc_context *ctx,
+ int inst,
+ const struct dcn10_ipp_registers *regs,
+ const struct dcn10_ipp_shift *ipp_shift,
+ const struct dcn10_ipp_mask *ipp_mask);
+
+#endif /* _DCN10_IPP_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
new file mode 100644
index 000000000000..76573e1f5b01
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_mpc.h"
+#include "dc.h"
+#include "mem_input.h"
+
+#define REG(reg)\
+ mpc10->mpc_regs->reg
+
+#define CTX \
+ mpc10->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mpc10->mpc_shift->field_name, mpc10->mpc_mask->field_name
+
+#define MODE_TOP_ONLY 1
+#define MODE_BLEND 3
+#define BLND_PP_ALPHA 0
+#define BLND_GLOBAL_ALPHA 2
+
+
+static void mpc10_set_bg_color(
+ struct dcn10_mpc *mpc10,
+ struct tg_color *bg_color,
+ int id)
+{
+ /* mpc color is 12 bit. tg_color is 10 bit */
+ /* todo: might want to use 16 bit to represent color and have each
+ * hw block translate to correct color depth.
+ */
+ uint32_t bg_r_cr = bg_color->color_r_cr << 2;
+ uint32_t bg_g_y = bg_color->color_g_y << 2;
+ uint32_t bg_b_cb = bg_color->color_b_cb << 2;
+
+ REG_SET(MPCC_BG_R_CR[id], 0,
+ MPCC_BG_R_CR, bg_r_cr);
+ REG_SET(MPCC_BG_G_Y[id], 0,
+ MPCC_BG_G_Y, bg_g_y);
+ REG_SET(MPCC_BG_B_CB[id], 0,
+ MPCC_BG_B_CB, bg_b_cb);
+}
+
+void mpc10_assert_idle_mpcc(struct mpc *mpc, int id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << id));
+ REG_WAIT(MPCC_STATUS[id],
+ MPCC_IDLE, 1,
+ 1, 100000);
+}
+
+static int mpc10_get_idle_mpcc_id(struct dcn10_mpc *mpc10)
+{
+ int i;
+ int last_free_mpcc_id = -1;
+
+ for (i = 0; i < mpc10->num_mpcc; i++) {
+ uint32_t is_idle = 0;
+
+ if (mpc10->mpcc_in_use_mask & 1 << i)
+ continue;
+
+ last_free_mpcc_id = i;
+ REG_GET(MPCC_STATUS[i], MPCC_IDLE, &is_idle);
+ if (is_idle)
+ return i;
+ }
+
+ /* This assert should never trigger, we have mpcc leak if it does */
+ ASSERT(last_free_mpcc_id != -1);
+
+ mpc10_assert_idle_mpcc(&mpc10->base, last_free_mpcc_id);
+ return last_free_mpcc_id;
+}
+
+static void mpc10_assert_mpcc_idle_before_connect(struct dcn10_mpc *mpc10, int id)
+{
+ unsigned int top_sel, mpc_busy, mpc_idle;
+
+ REG_GET(MPCC_TOP_SEL[id],
+ MPCC_TOP_SEL, &top_sel);
+
+ if (top_sel == 0xf) {
+ REG_GET_2(MPCC_STATUS[id],
+ MPCC_BUSY, &mpc_busy,
+ MPCC_IDLE, &mpc_idle);
+
+ ASSERT(mpc_busy == 0);
+ ASSERT(mpc_idle == 1);
+ }
+}
+
+void mpc10_mpcc_remove(
+ struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int dpp_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+
+ /* find z_idx for the dpp to be removed */
+ for (z_idx = 0; z_idx < tree_cfg->num_pipes; z_idx++)
+ if (tree_cfg->dpp[z_idx] == dpp_id)
+ break;
+
+ if (z_idx == tree_cfg->num_pipes) {
+ /* In case of resume from S3/S4, remove mpcc from bios left over */
+ REG_SET(MPCC_OPP_ID[dpp_id], 0,
+ MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_TOP_SEL[dpp_id], 0,
+ MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[dpp_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ return;
+ }
+
+ mpcc_id = tree_cfg->mpcc[z_idx];
+
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0,
+ MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
+ MPCC_TOP_SEL, 0xf);
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+
+ if (z_idx > 0) {
+ int top_mpcc_id = tree_cfg->mpcc[z_idx - 1];
+
+ if (z_idx + 1 < tree_cfg->num_pipes)
+ /* mpcc to be removed is in the middle of the tree */
+ REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
+ MPCC_BOT_SEL, tree_cfg->mpcc[z_idx + 1]);
+ else {
+ /* mpcc to be removed is at the bottom of the tree */
+ REG_SET(MPCC_BOT_SEL[top_mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+ REG_UPDATE(MPCC_CONTROL[top_mpcc_id],
+ MPCC_MODE, MODE_TOP_ONLY);
+ }
+ } else if (tree_cfg->num_pipes > 1)
+ /* mpcc to be removed is at the top of the tree */
+ REG_SET(MUX[opp_id], 0,
+ MPC_OUT_MUX, tree_cfg->mpcc[z_idx + 1]);
+ else
+ /* mpcc to be removed is the only one in the tree */
+ REG_SET(MUX[opp_id], 0, MPC_OUT_MUX, 0xf);
+
+ /* mark this mpcc as not in use */
+ mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
+ tree_cfg->num_pipes--;
+ for (; z_idx < tree_cfg->num_pipes; z_idx++) {
+ tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx + 1];
+ tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx + 1];
+ }
+ tree_cfg->dpp[tree_cfg->num_pipes] = 0xdeadbeef;
+ tree_cfg->mpcc[tree_cfg->num_pipes] = 0xdeadbeef;
+}
+
+static void mpc10_add_to_tree_cfg(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg,
+ int mpcc_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_mode = MODE_TOP_ONLY;
+ int position = cfg->z_index;
+ struct mpc_tree_cfg *tree_cfg = cfg->tree_cfg;
+ int alpha_blnd_mode = cfg->per_pixel_alpha ?
+ BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+ int z_idx;
+
+ REG_SET(MPCC_OPP_ID[mpcc_id], 0,
+ MPCC_OPP_ID, cfg->opp_id);
+
+ REG_SET(MPCC_TOP_SEL[mpcc_id], 0,
+ MPCC_TOP_SEL, cfg->dpp_id);
+
+ if (position == 0) {
+ /* idle dpp/mpcc is added to the top layer of tree */
+
+ if (tree_cfg->num_pipes > 0) {
+ /* get instance of previous top mpcc */
+ int prev_top_mpcc_id = tree_cfg->mpcc[0];
+
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, prev_top_mpcc_id);
+ mpcc_mode = MODE_BLEND;
+ }
+
+ /* opp will get new output. from new added mpcc */
+ REG_SET(MUX[cfg->opp_id], 0, MPC_OUT_MUX, mpcc_id);
+
+ } else if (position == tree_cfg->num_pipes) {
+ /* idle dpp/mpcc is added to the bottom layer of tree */
+
+ /* get instance of previous bottom mpcc, set to middle layer */
+ int prev_bot_mpcc_id = tree_cfg->mpcc[tree_cfg->num_pipes - 1];
+
+ REG_SET(MPCC_BOT_SEL[prev_bot_mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[prev_bot_mpcc_id],
+ MPCC_MODE, MODE_BLEND);
+
+ /* mpcc_id become new bottom mpcc*/
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, 0xf);
+
+ } else {
+ /* idle dpp/mpcc is added to middle of tree */
+ int above_mpcc_id = tree_cfg->mpcc[position - 1];
+ int below_mpcc_id = tree_cfg->mpcc[position];
+
+ /* mpcc above new mpcc_id has new bottom mux*/
+ REG_SET(MPCC_BOT_SEL[above_mpcc_id], 0,
+ MPCC_BOT_SEL, mpcc_id);
+ REG_UPDATE(MPCC_CONTROL[above_mpcc_id],
+ MPCC_MODE, MODE_BLEND);
+
+ /* mpcc_id bottom mux is from below mpcc*/
+ REG_SET(MPCC_BOT_SEL[mpcc_id], 0,
+ MPCC_BOT_SEL, below_mpcc_id);
+ mpcc_mode = MODE_BLEND;
+ }
+
+ REG_SET_4(MPCC_CONTROL[mpcc_id], 0xffffffff,
+ MPCC_MODE, mpcc_mode,
+ MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha,
+ MPCC_BLND_ACTIVE_OVERLAP_ONLY, false);
+
+ /* update mpc_tree_cfg with new mpcc */
+ for (z_idx = tree_cfg->num_pipes; z_idx > position; z_idx--) {
+ tree_cfg->dpp[z_idx] = tree_cfg->dpp[z_idx - 1];
+ tree_cfg->mpcc[z_idx] = tree_cfg->mpcc[z_idx - 1];
+ }
+ tree_cfg->dpp[position] = cfg->dpp_id;
+ tree_cfg->mpcc[position] = mpcc_id;
+ tree_cfg->num_pipes++;
+}
+
+int mpc10_mpcc_add(struct mpc *mpc, struct mpcc_cfg *cfg)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+
+ ASSERT(cfg->z_index < mpc10->num_mpcc);
+
+ /* check in dpp already exists in mpc tree */
+ for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
+ if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
+ break;
+ if (z_idx == cfg->tree_cfg->num_pipes) {
+ ASSERT(cfg->z_index <= cfg->tree_cfg->num_pipes);
+ mpcc_id = mpc10_get_idle_mpcc_id(mpc10);
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ mpcc_id = cfg->dpp_id;
+ /* end hack*/
+
+ ASSERT(!(mpc10->mpcc_in_use_mask & 1 << mpcc_id));
+
+ if (mpc->ctx->dc->debug.sanity_checks)
+ mpc10_assert_mpcc_idle_before_connect(mpc10, mpcc_id);
+ } else {
+ ASSERT(cfg->z_index < cfg->tree_cfg->num_pipes);
+ mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+ mpc10_mpcc_remove(mpc, cfg->tree_cfg, cfg->opp_id, cfg->dpp_id);
+ }
+
+ /* add dpp/mpcc pair to mpc_tree_cfg and update mpcc registers */
+ mpc10_add_to_tree_cfg(mpc, cfg, mpcc_id);
+
+ /* set background color */
+ mpc10_set_bg_color(mpc10, &cfg->black_color, mpcc_id);
+
+ /* mark this mpcc as in use */
+ mpc10->mpcc_in_use_mask |= 1 << mpcc_id;
+
+ return mpcc_id;
+}
+
+void mpc10_update_blend_mode(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ int mpcc_id, z_idx;
+ int alpha_blnd_mode = cfg->per_pixel_alpha ?
+ BLND_PP_ALPHA : BLND_GLOBAL_ALPHA;
+
+ /* find z_idx for the dpp that requires blending mode update*/
+ for (z_idx = 0; z_idx < cfg->tree_cfg->num_pipes; z_idx++)
+ if (cfg->tree_cfg->dpp[z_idx] == cfg->dpp_id)
+ break;
+
+ ASSERT(z_idx < cfg->tree_cfg->num_pipes);
+ mpcc_id = cfg->tree_cfg->mpcc[z_idx];
+
+ REG_UPDATE_2(MPCC_CONTROL[mpcc_id],
+ MPCC_ALPHA_BLND_MODE, alpha_blnd_mode,
+ MPCC_ALPHA_MULTIPLIED_MODE, cfg->pre_multiplied_alpha);
+}
+
+const struct mpc_funcs dcn10_mpc_funcs = {
+ .add = mpc10_mpcc_add,
+ .remove = mpc10_mpcc_remove,
+ .wait_for_idle = mpc10_assert_idle_mpcc,
+ .update_blend_mode = mpc10_update_blend_mode,
+};
+
+void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
+ struct dc_context *ctx,
+ const struct dcn_mpc_registers *mpc_regs,
+ const struct dcn_mpc_shift *mpc_shift,
+ const struct dcn_mpc_mask *mpc_mask,
+ int num_mpcc)
+{
+ mpc10->base.ctx = ctx;
+
+ mpc10->base.funcs = &dcn10_mpc_funcs;
+
+ mpc10->mpc_regs = mpc_regs;
+ mpc10->mpc_shift = mpc_shift;
+ mpc10->mpc_mask = mpc_mask;
+
+ mpc10->mpcc_in_use_mask = 0;
+ mpc10->num_mpcc = num_mpcc;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
new file mode 100644
index 000000000000..683ce4aaa76e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -0,0 +1,138 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_DCN10_H__
+#define __DC_MPCC_DCN10_H__
+
+#include "mpc.h"
+
+#define TO_DCN10_MPC(mpc_base) \
+ container_of(mpc_base, struct dcn10_mpc, base)
+
+#define MAX_MPCC 6
+#define MAX_OPP 6
+
+#define MPC_COMMON_REG_LIST_DCN1_0(inst) \
+ SRII(MPCC_TOP_SEL, MPCC, inst),\
+ SRII(MPCC_BOT_SEL, MPCC, inst),\
+ SRII(MPCC_CONTROL, MPCC, inst),\
+ SRII(MPCC_STATUS, MPCC, inst),\
+ SRII(MPCC_OPP_ID, MPCC, inst),\
+ SRII(MPCC_BG_G_Y, MPCC, inst),\
+ SRII(MPCC_BG_R_CR, MPCC, inst),\
+ SRII(MPCC_BG_B_CB, MPCC, inst),\
+ SRII(MPCC_BG_B_CB, MPCC, inst)
+
+#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
+ SRII(MUX, MPC_OUT, inst)
+
+#define MPC_COMMON_REG_VARIABLE_LIST \
+ uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
+ uint32_t MPCC_BOT_SEL[MAX_MPCC]; \
+ uint32_t MPCC_CONTROL[MAX_MPCC]; \
+ uint32_t MPCC_STATUS[MAX_MPCC]; \
+ uint32_t MPCC_OPP_ID[MAX_MPCC]; \
+ uint32_t MPCC_BG_G_Y[MAX_MPCC]; \
+ uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
+ uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
+ uint32_t MUX[MAX_OPP];
+
+#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
+ SF(MPCC0_MPCC_BOT_SEL, MPCC_BOT_SEL, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_BLND_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_ALPHA_MULTIPLIED_MODE, mask_sh),\
+ SF(MPCC0_MPCC_CONTROL, MPCC_BLND_ACTIVE_OVERLAP_ONLY, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_IDLE, mask_sh),\
+ SF(MPCC0_MPCC_STATUS, MPCC_BUSY, mask_sh),\
+ SF(MPCC0_MPCC_OPP_ID, MPCC_OPP_ID, mask_sh),\
+ SF(MPCC0_MPCC_BG_G_Y, MPCC_BG_G_Y, mask_sh),\
+ SF(MPCC0_MPCC_BG_R_CR, MPCC_BG_R_CR, mask_sh),\
+ SF(MPCC0_MPCC_BG_B_CB, MPCC_BG_B_CB, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+
+#define MPC_REG_FIELD_LIST(type) \
+ type MPCC_TOP_SEL;\
+ type MPCC_BOT_SEL;\
+ type MPCC_MODE;\
+ type MPCC_ALPHA_BLND_MODE;\
+ type MPCC_ALPHA_MULTIPLIED_MODE;\
+ type MPCC_BLND_ACTIVE_OVERLAP_ONLY;\
+ type MPCC_IDLE;\
+ type MPCC_BUSY;\
+ type MPCC_OPP_ID;\
+ type MPCC_BG_G_Y;\
+ type MPCC_BG_R_CR;\
+ type MPCC_BG_B_CB;\
+ type MPC_OUT_MUX;
+
+struct dcn_mpc_registers {
+ MPC_COMMON_REG_VARIABLE_LIST
+};
+
+struct dcn_mpc_shift {
+ MPC_REG_FIELD_LIST(uint8_t)
+};
+
+struct dcn_mpc_mask {
+ MPC_REG_FIELD_LIST(uint32_t)
+};
+
+struct dcn10_mpc {
+ struct mpc base;
+
+ int mpcc_in_use_mask;
+ int num_mpcc;
+ const struct dcn_mpc_registers *mpc_regs;
+ const struct dcn_mpc_shift *mpc_shift;
+ const struct dcn_mpc_mask *mpc_mask;
+};
+
+void dcn10_mpc_construct(struct dcn10_mpc *mpcc10,
+ struct dc_context *ctx,
+ const struct dcn_mpc_registers *mpc_regs,
+ const struct dcn_mpc_shift *mpc_shift,
+ const struct dcn_mpc_mask *mpc_mask,
+ int num_mpcc);
+
+int mpc10_mpcc_add(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg);
+
+void mpc10_mpcc_remove(
+ struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int dpp_id);
+
+void mpc10_assert_idle_mpcc(
+ struct mpc *mpc,
+ int id);
+
+void mpc10_update_blend_mode(
+ struct mpc *mpc,
+ struct mpcc_cfg *cfg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
new file mode 100644
index 000000000000..a136f70b7a3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn10_opp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (oppn10->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ oppn10->opp_shift->field_name, oppn10->opp_mask->field_name
+
+#define CTX \
+ oppn10->base.ctx
+
+
+
+/************* FORMATTER ************/
+
+/**
+ * set_truncation
+ * 1) set truncation depth: 0 for 18 bpp or 1 for 24 bpp
+ * 2) enable truncation
+ * 3) HW remove 12bit FMT support for DCE11 power saving reason.
+ */
+static void set_truncation(
+ struct dcn10_opp *oppn10,
+ const struct bit_depth_reduction_params *params)
+{
+ REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
+ FMT_TRUNCATE_EN, params->flags.TRUNCATE_ENABLED,
+ FMT_TRUNCATE_DEPTH, params->flags.TRUNCATE_DEPTH,
+ FMT_TRUNCATE_MODE, params->flags.TRUNCATE_MODE);
+}
+
+static void set_spatial_dither(
+ struct dcn10_opp *oppn10,
+ const struct bit_depth_reduction_params *params)
+{
+ /*Disable spatial (random) dithering*/
+ REG_UPDATE_7(FMT_BIT_DEPTH_CONTROL,
+ FMT_SPATIAL_DITHER_EN, 0,
+ FMT_SPATIAL_DITHER_MODE, 0,
+ FMT_SPATIAL_DITHER_DEPTH, 0,
+ FMT_TEMPORAL_DITHER_EN, 0,
+ FMT_HIGHPASS_RANDOM_ENABLE, 0,
+ FMT_FRAME_RANDOM_ENABLE, 0,
+ FMT_RGB_RANDOM_ENABLE, 0);
+
+
+ /* only use FRAME_COUNTER_MAX if frameRandom == 1*/
+ if (params->flags.FRAME_RANDOM == 1) {
+ if (params->flags.SPATIAL_DITHER_DEPTH == 0 || params->flags.SPATIAL_DITHER_DEPTH == 1) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 15,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 2);
+ } else if (params->flags.SPATIAL_DITHER_DEPTH == 2) {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 3,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 1);
+ } else {
+ return;
+ }
+ } else {
+ REG_UPDATE_2(FMT_CONTROL,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, 0,
+ FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, 0);
+ }
+
+ /*Set seed for random values for
+ * spatial dithering for R,G,B channels*/
+
+ REG_SET(FMT_DITHER_RAND_R_SEED, 0,
+ FMT_RAND_R_SEED, params->r_seed_value);
+
+ REG_SET(FMT_DITHER_RAND_G_SEED, 0,
+ FMT_RAND_G_SEED, params->g_seed_value);
+
+ REG_SET(FMT_DITHER_RAND_B_SEED, 0,
+ FMT_RAND_B_SEED, params->b_seed_value);
+
+ /* FMT_OFFSET_R_Cr 31:16 0x0 Setting the zero
+ * offset for the R/Cr channel, lower 4LSB
+ * is forced to zeros. Typically set to 0
+ * RGB and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_G_Y 31:16 0x0 Setting the zero
+ * offset for the G/Y channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB
+ * and 0x80000 YCbCr.
+ */
+ /* FMT_OFFSET_B_Cb 31:16 0x0 Setting the zero
+ * offset for the B/Cb channel, lower 4LSB is
+ * forced to zeros. Typically set to 0 RGB and
+ * 0x80000 YCbCr.
+ */
+
+ REG_UPDATE_6(FMT_BIT_DEPTH_CONTROL,
+ /*Enable spatial dithering*/
+ FMT_SPATIAL_DITHER_EN, params->flags.SPATIAL_DITHER_ENABLED,
+ /* Set spatial dithering mode
+ * (default is Seed patterrn AAAA...)
+ */
+ FMT_SPATIAL_DITHER_MODE, params->flags.SPATIAL_DITHER_MODE,
+ /*Set spatial dithering bit depth*/
+ FMT_SPATIAL_DITHER_DEPTH, params->flags.SPATIAL_DITHER_DEPTH,
+ /*Disable High pass filter*/
+ FMT_HIGHPASS_RANDOM_ENABLE, params->flags.HIGHPASS_RANDOM,
+ /*Reset only at startup*/
+ FMT_FRAME_RANDOM_ENABLE, params->flags.FRAME_RANDOM,
+ /*Set RGB data dithered with x^28+x^3+1*/
+ FMT_RGB_RANDOM_ENABLE, params->flags.RGB_RANDOM);
+}
+
+static void oppn10_program_bit_depth_reduction(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ set_truncation(oppn10, params);
+ set_spatial_dither(oppn10, params);
+ /* TODO
+ * set_temporal_dither(oppn10, params);
+ */
+}
+
+/**
+ * set_pixel_encoding
+ *
+ * Set Pixel Encoding
+ * 0: RGB 4:4:4 or YCbCr 4:4:4 or YOnly
+ * 1: YCbCr 4:2:2
+ */
+static void set_pixel_encoding(
+ struct dcn10_opp *oppn10,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ switch (params->pixel_encoding) {
+
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0);
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1);
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * Set Clamping
+ * 1) Set clamping format based on bpc - 0 for 6bpc (No clamping)
+ * 1 for 8 bpc
+ * 2 for 10 bpc
+ * 3 for 12 bpc
+ * 7 for programable
+ * 2) Enable clamp if Limited range requested
+ */
+static void opp_set_clamping(
+ struct dcn10_opp *oppn10,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 0,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+
+ switch (params->clamping_level) {
+ case CLAMPING_FULL_RANGE:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 0);
+ break;
+ case CLAMPING_LIMITED_RANGE_8BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 1);
+ break;
+ case CLAMPING_LIMITED_RANGE_10BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 2);
+
+ break;
+ case CLAMPING_LIMITED_RANGE_12BPC:
+ REG_UPDATE_2(FMT_CLAMP_CNTL,
+ FMT_CLAMP_DATA_EN, 1,
+ FMT_CLAMP_COLOR_FORMAT, 3);
+ break;
+ case CLAMPING_LIMITED_RANGE_PROGRAMMABLE:
+ /* TODO */
+ default:
+ break;
+ }
+
+}
+
+static void oppn10_set_dyn_expansion(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 0,
+ FMT_DYNAMIC_EXP_MODE, 0);
+
+ /*00 - 10-bit -> 12-bit dynamic expansion*/
+ /*01 - 8-bit -> 12-bit dynamic expansion*/
+ if (signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ signal == SIGNAL_TYPE_VIRTUAL) {
+ switch (color_dpth) {
+ case COLOR_DEPTH_888:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 1);
+ break;
+ case COLOR_DEPTH_101010:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ case COLOR_DEPTH_121212:
+ REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+ FMT_DYNAMIC_EXP_EN, 1,/*otherwise last two bits are zero*/
+ FMT_DYNAMIC_EXP_MODE, 0);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void opp_program_clamping_and_pixel_encoding(
+ struct output_pixel_processor *opp,
+ const struct clamping_and_pixel_encoding_params *params)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ opp_set_clamping(oppn10, params);
+ set_pixel_encoding(oppn10, params);
+}
+
+static void oppn10_program_fmt(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ if (clamping->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ REG_UPDATE(FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, 0);
+
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+ oppn10_program_bit_depth_reduction(
+ opp,
+ fmt_bit_depth);
+
+ opp_program_clamping_and_pixel_encoding(
+ opp,
+ clamping);
+
+ return;
+}
+
+
+
+static void oppn10_set_stereo_polarity(
+ struct output_pixel_processor *opp,
+ bool enable, bool rightEyePolarity)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ REG_UPDATE(FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, enable);
+}
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static void dcn10_opp_destroy(struct output_pixel_processor **opp)
+{
+ kfree(TO_DCN10_OPP(*opp));
+ *opp = NULL;
+}
+
+static struct opp_funcs dcn10_opp_funcs = {
+ .opp_set_dyn_expansion = oppn10_set_dyn_expansion,
+ .opp_program_fmt = oppn10_program_fmt,
+ .opp_program_bit_depth_reduction = oppn10_program_bit_depth_reduction,
+ .opp_set_stereo_polarity = oppn10_set_stereo_polarity,
+ .opp_destroy = dcn10_opp_destroy
+};
+
+void dcn10_opp_construct(struct dcn10_opp *oppn10,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn10_opp_registers *regs,
+ const struct dcn10_opp_shift *opp_shift,
+ const struct dcn10_opp_mask *opp_mask)
+{
+ int i;
+ oppn10->base.ctx = ctx;
+ oppn10->base.inst = inst;
+ oppn10->base.funcs = &dcn10_opp_funcs;
+
+ oppn10->base.mpc_tree.dpp[0] = inst;
+ oppn10->base.mpc_tree.mpcc[0] = inst;
+ oppn10->base.mpc_tree.num_pipes = 1;
+ for (i = 0; i < MAX_PIPES; i++)
+ oppn10->base.mpcc_disconnect_pending[i] = false;
+
+ oppn10->regs = regs;
+ oppn10->opp_shift = opp_shift;
+ oppn10->opp_mask = opp_mask;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
new file mode 100644
index 000000000000..790ce6014832
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h
@@ -0,0 +1,186 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCN10_H__
+#define __DC_OPP_DCN10_H__
+
+#include "opp.h"
+
+#define TO_DCN10_OPP(opp)\
+ container_of(opp, struct dcn10_opp, base)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_REG_LIST_DCN(id) \
+ SRI(FMT_BIT_DEPTH_CONTROL, FMT, id), \
+ SRI(FMT_CONTROL, FMT, id), \
+ SRI(FMT_DITHER_RAND_R_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_G_SEED, FMT, id), \
+ SRI(FMT_DITHER_RAND_B_SEED, FMT, id), \
+ SRI(FMT_CLAMP_CNTL, FMT, id), \
+ SRI(FMT_DYNAMIC_EXP_CNTL, FMT, id), \
+ SRI(FMT_MAP420_MEMORY_CONTROL, FMT, id)
+
+#define OPP_REG_LIST_DCN10(id) \
+ OPP_REG_LIST_DCN(id)
+
+#define OPP_MASK_SH_LIST_DCN(mask_sh) \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \
+ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh), \
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_DATA_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_CLAMP_CNTL, FMT_CLAMP_COLOR_FORMAT, mask_sh), \
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_EN, mask_sh), \
+ OPP_SF(FMT0_FMT_DYNAMIC_EXP_CNTL, FMT_DYNAMIC_EXP_MODE, mask_sh), \
+ OPP_SF(FMT0_FMT_MAP420_MEMORY_CONTROL, FMT_MAP420MEM_PWR_FORCE, mask_sh)
+
+#define OPP_MASK_SH_LIST_DCN10(mask_sh) \
+ OPP_MASK_SH_LIST_DCN(mask_sh)
+
+#define OPP_DCN10_REG_FIELD_LIST(type) \
+ type DPG_EN; \
+ type DPG_MODE; \
+ type DPG_VRES; \
+ type DPG_HRES; \
+ type DPG_COLOUR0_R_CR; \
+ type DPG_COLOUR1_R_CR; \
+ type DPG_COLOUR0_B_CB; \
+ type DPG_COLOUR1_B_CB; \
+ type DPG_COLOUR0_G_Y; \
+ type DPG_COLOUR1_G_Y; \
+ type CM_OCSC_C11; \
+ type CM_OCSC_C12; \
+ type CM_OCSC_C13; \
+ type CM_OCSC_C14; \
+ type CM_OCSC_C21; \
+ type CM_OCSC_C22; \
+ type CM_OCSC_C23; \
+ type CM_OCSC_C24; \
+ type CM_OCSC_C31; \
+ type CM_OCSC_C32; \
+ type CM_OCSC_C33; \
+ type CM_OCSC_C34; \
+ type CM_COMB_C11; \
+ type CM_COMB_C12; \
+ type CM_COMB_C13; \
+ type CM_COMB_C14; \
+ type CM_COMB_C21; \
+ type CM_COMB_C22; \
+ type CM_COMB_C23; \
+ type CM_COMB_C24; \
+ type CM_COMB_C31; \
+ type CM_COMB_C32; \
+ type CM_COMB_C33; \
+ type CM_COMB_C34; \
+ type FMT_TRUNCATE_EN; \
+ type FMT_TRUNCATE_DEPTH; \
+ type FMT_TRUNCATE_MODE; \
+ type FMT_SPATIAL_DITHER_EN; \
+ type FMT_SPATIAL_DITHER_MODE; \
+ type FMT_SPATIAL_DITHER_DEPTH; \
+ type FMT_TEMPORAL_DITHER_EN; \
+ type FMT_HIGHPASS_RANDOM_ENABLE; \
+ type FMT_FRAME_RANDOM_ENABLE; \
+ type FMT_RGB_RANDOM_ENABLE; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX; \
+ type FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP; \
+ type FMT_RAND_R_SEED; \
+ type FMT_RAND_G_SEED; \
+ type FMT_RAND_B_SEED; \
+ type FMT_PIXEL_ENCODING; \
+ type FMT_CLAMP_DATA_EN; \
+ type FMT_CLAMP_COLOR_FORMAT; \
+ type FMT_DYNAMIC_EXP_EN; \
+ type FMT_DYNAMIC_EXP_MODE; \
+ type FMT_MAP420MEM_PWR_FORCE; \
+ type FMT_STEREOSYNC_OVERRIDE
+
+struct dcn10_opp_shift {
+ OPP_DCN10_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn10_opp_mask {
+ OPP_DCN10_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn10_opp_registers {
+ uint32_t DPG_CONTROL;
+ uint32_t DPG_COLOUR_B_CB;
+ uint32_t DPG_COLOUR_G_Y;
+ uint32_t DPG_COLOUR_R_CR;
+ uint32_t CM_OCSC_C11_C12;
+ uint32_t CM_OCSC_C13_C14;
+ uint32_t CM_OCSC_C21_C22;
+ uint32_t CM_OCSC_C23_C24;
+ uint32_t CM_OCSC_C31_C32;
+ uint32_t CM_OCSC_C33_C34;
+ uint32_t CM_COMB_C11_C12;
+ uint32_t CM_COMB_C13_C14;
+ uint32_t CM_COMB_C21_C22;
+ uint32_t CM_COMB_C23_C24;
+ uint32_t CM_COMB_C31_C32;
+ uint32_t CM_COMB_C33_C34;
+ uint32_t FMT_BIT_DEPTH_CONTROL;
+ uint32_t FMT_CONTROL;
+ uint32_t FMT_DITHER_RAND_R_SEED;
+ uint32_t FMT_DITHER_RAND_G_SEED;
+ uint32_t FMT_DITHER_RAND_B_SEED;
+ uint32_t FMT_CLAMP_CNTL;
+ uint32_t FMT_DYNAMIC_EXP_CNTL;
+ uint32_t FMT_MAP420_MEMORY_CONTROL;
+};
+
+struct dcn10_opp {
+ struct output_pixel_processor base;
+
+ const struct dcn10_opp_registers *regs;
+ const struct dcn10_opp_shift *opp_shift;
+ const struct dcn10_opp_mask *opp_mask;
+
+ bool is_write_to_ram_a_safe;
+};
+
+void dcn10_opp_construct(struct dcn10_opp *oppn10,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn10_opp_registers *regs,
+ const struct dcn10_opp_shift *opp_shift,
+ const struct dcn10_opp_mask *opp_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
new file mode 100644
index 000000000000..9fc8f827f2a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -0,0 +1,1468 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn10/dcn10_resource.h"
+
+#include "dcn10/dcn10_ipp.h"
+#include "dcn10/dcn10_mpc.h"
+#include "irq/dcn10/irq_service_dcn10.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_timing_generator.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_opp.h"
+#include "dce/dce_link_encoder.h"
+#include "dce/dce_stream_encoder.h"
+#include "dce/dce_clocks.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "../virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce112/dce112_resource.h"
+#include "dcn10_hubp.h"
+
+#include "vega10/soc15ip.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+
+#include "raven1/NBIO/nbio_7_0_offset.h"
+
+#include "raven1/MMHUB/mmhub_9_1_offset.h"
+#include "raven1/MMHUB/mmhub_9_1_sh_mask.h"
+
+#include "reg_helper.h"
+#include "dce/dce_abm.h"
+#include "dce/dce_dmcu.h"
+
+#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+ #define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f
+ #define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f
+ #define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f
+ #define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f
+ #define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL 0x260f
+ #define mmDP5_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL 0x270f
+ #define mmDP6_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2
+#endif
+
+
+enum dcn10_clk_src_array_id {
+ DCN10_CLK_SRC_PLL0,
+ DCN10_CLK_SRC_PLL1,
+ DCN10_CLK_SRC_PLL2,
+ DCN10_CLK_SRC_PLL3,
+ DCN10_CLK_SRC_TOTAL
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIF_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+
+static const struct dce_dmcu_registers dmcu_regs = {
+ DMCU_DCN10_REG_LIST()
+};
+
+static const struct dce_dmcu_shift dmcu_shift = {
+ DMCU_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_dmcu_mask dmcu_mask = {
+ DMCU_MASK_SH_LIST_DCN10(_MASK)
+};
+
+static const struct dce_abm_registers abm_regs = {
+ ABM_DCN10_REG_LIST(0)
+};
+
+static const struct dce_abm_shift abm_shift = {
+ ABM_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_abm_mask abm_mask = {
+ ABM_MASK_SH_LIST_DCN10(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN_REG_LIST(id),\
+ .TMDS_CNTL = 0,\
+ .AFMT_AVI_INFO0 = 0,\
+ .AFMT_AVI_INFO1 = 0,\
+ .AFMT_AVI_INFO2 = 0,\
+ .AFMT_AVI_INFO3 = 0,\
+}
+
+static const struct dce110_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1),
+ stream_enc_regs(2),
+ stream_enc_regs(3),
+};
+
+static const struct dce_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dce_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN10(_MASK),
+ .AFMT_GENERIC0_UPDATE = 0,
+ .AFMT_GENERIC2_UPDATE = 0,
+ .DP_DYN_RANGE = 0,
+ .DP_YCBCR_RANGE = 0,
+ .HDMI_AVI_INFO_SEND = 0,
+ .HDMI_AVI_INFO_CONT = 0,
+ .HDMI_AVI_INFO_LINE = 0,
+ .DP_SEC_AVI_ENABLE = 0,
+ .AFMT_AVI_INFO_VERSION = 0
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+ audio_regs(2),
+ audio_regs(3),
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_aduio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+#define link_regs(id)\
+[id] = {\
+ LE_DCN10_REG_LIST(id), \
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \
+}
+
+static const struct dce110_link_enc_registers link_enc_regs[] = {
+ link_regs(0),
+ link_regs(1),
+ link_regs(2),
+ link_regs(3),
+ link_regs(4),
+ link_regs(5),
+ link_regs(6),
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn10_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+};
+
+static const struct dcn10_ipp_shift ipp_shift = {
+ IPP_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn10_ipp_mask ipp_mask = {
+ IPP_MASK_SH_LIST_DCN10(_MASK),
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn10_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+ opp_regs(2),
+ opp_regs(3),
+};
+
+static const struct dcn10_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn10_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN10(_MASK),
+};
+
+#define tf_regs(id)\
+[id] = {\
+ TF_REG_LIST_DCN10(id),\
+}
+
+static const struct dcn_dpp_registers tf_regs[] = {
+ tf_regs(0),
+ tf_regs(1),
+ tf_regs(2),
+ tf_regs(3),
+};
+
+static const struct dcn_dpp_shift tf_shift = {
+ TF_REG_LIST_SH_MASK_DCN10(__SHIFT)
+};
+
+static const struct dcn_dpp_mask tf_mask = {
+ TF_REG_LIST_SH_MASK_DCN10(_MASK),
+};
+
+static const struct dcn_mpc_registers mpc_regs = {
+ MPC_COMMON_REG_LIST_DCN1_0(0),
+ MPC_COMMON_REG_LIST_DCN1_0(1),
+ MPC_COMMON_REG_LIST_DCN1_0(2),
+ MPC_COMMON_REG_LIST_DCN1_0(3),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(0),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(1),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(2),
+ MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(3)
+};
+
+static const struct dcn_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dcn_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+};
+
+#define tg_regs(id)\
+[id] = {TG_COMMON_REG_LIST_DCN1_0(id)}
+
+static const struct dcn_tg_registers tg_regs[] = {
+ tg_regs(0),
+ tg_regs(1),
+ tg_regs(2),
+ tg_regs(3),
+};
+
+static const struct dcn_tg_shift tg_shift = {
+ TG_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dcn_tg_mask tg_mask = {
+ TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
+};
+
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define mi_regs(id)\
+[id] = {\
+ MI_REG_LIST_DCN10(id)\
+}
+
+
+static const struct dcn_mi_registers mi_regs[] = {
+ mi_regs(0),
+ mi_regs(1),
+ mi_regs(2),
+ mi_regs(3),
+};
+
+static const struct dcn_mi_shift mi_shift = {
+ MI_MASK_SH_LIST_DCN10(__SHIFT)
+};
+
+static const struct dcn_mi_mask mi_mask = {
+ MI_MASK_SH_LIST_DCN10(_MASK)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B),
+ clk_src_regs(2, C),
+ clk_src_regs(3, D)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
+};
+
+
+static const struct resource_caps res_cap = {
+ .num_timing_generator = 4,
+ .num_video_plane = 4,
+ .num_audio = 4,
+ .num_stream_encoder = 4,
+ .num_pll = 4,
+};
+
+static const struct dc_debug debug_defaults_drv = {
+ .sanity_checks = true,
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+
+ .min_disp_clk_khz = 300000,
+
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = false,
+ .pplib_wm_report_mode = WM_REPORT_DEFAULT,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .force_single_disp_pipe_split = true,
+ .disable_dcc = DCC_ENABLE,
+ .voltage_align_fclk = true,
+ .disable_stereo_support = true,
+ .vsr_support = true,
+ .performance_trace = false,
+};
+
+static const struct dc_debug debug_defaults_diags = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = true,
+ .clock_trace = true,
+ .disable_stutter = true,
+ .disable_pplib_clock_request = true,
+ .disable_pplib_wm_range = true
+};
+
+static void dcn10_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN10_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn10_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn10_dpp *dpp =
+ kzalloc(sizeof(struct dcn10_dpp), GFP_KERNEL);
+
+ if (!dpp)
+ return NULL;
+
+ dpp1_construct(dpp, ctx, inst,
+ &tf_regs[inst], &tf_shift, &tf_mask);
+ return &dpp->base;
+}
+
+static struct input_pixel_processor *dcn10_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_ipp *ipp =
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
+
+ if (!ipp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn10_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+
+static struct output_pixel_processor *dcn10_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_opp *opp =
+ kzalloc(sizeof(struct dcn10_opp), GFP_KERNEL);
+
+ if (!opp) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn10_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
+{
+ struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
+ GFP_KERNEL);
+
+ if (!mpc10)
+ return NULL;
+
+ dcn10_mpc_construct(mpc10, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ 4);
+
+ return &mpc10->base;
+}
+
+static struct timing_generator *dcn10_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct dcn10_timing_generator *tgn10 =
+ kzalloc(sizeof(struct dcn10_timing_generator), GFP_KERNEL);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &tg_regs[instance];
+ tgn10->tg_shift = &tg_shift;
+ tgn10->tg_mask = &tg_mask;
+
+ dcn10_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .ycbcr420_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true,
+ .flags.bits.IS_YCBCR_CAPABLE = true
+};
+
+struct link_encoder *dcn10_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dce110_link_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source]);
+
+ return &enc110->base;
+}
+
+struct clock_source *dcn10_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce110_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+}
+
+static struct audio *create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct stream_encoder *dcn10_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dce110_stream_encoder *enc110 =
+ kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL);
+
+ if (!enc110)
+ return NULL;
+
+ dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+ return &enc110->base;
+}
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN1_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN1_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN1_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dcn10_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = create_audio,
+ .create_stream_encoder = dcn10_stream_encoder_create,
+ .create_hwseq = dcn10_hwseq_create,
+};
+
+static const struct resource_create_funcs res_create_maximus_funcs = {
+ .read_dce_straps = NULL,
+ .create_audio = NULL,
+ .create_stream_encoder = NULL,
+ .create_hwseq = dcn10_hwseq_create,
+};
+
+void dcn10_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
+{
+ struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+
+ if (!pp_smu)
+ return pp_smu;
+
+ dm_pp_get_funcs_rv(ctx, pp_smu);
+ return pp_smu;
+}
+
+static void destruct(struct dcn10_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ /* TODO: free dcn version of stream encoder once implemented
+ * rather than using virtual stream encoder
+ */
+ kfree(pool->base.stream_enc[i]);
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN10_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+
+ if (pool->base.dpps[i] != NULL)
+ dcn10_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.stream_enc_count; i++)
+ kfree(pool->base.stream_enc[i]);
+
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn10_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn10_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ if (pool->base.abm != NULL)
+ dce_abm_destroy(&pool->base.abm);
+
+ if (pool->base.dmcu != NULL)
+ dce_dmcu_destroy(&pool->base.dmcu);
+
+ if (pool->base.display_clock != NULL)
+ dce_disp_clk_destroy(&pool->base.display_clock);
+
+ kfree(pool->base.pp_smu);
+}
+
+static struct hubp *dcn10_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn10_hubp *hubp1 =
+ kzalloc(sizeof(struct dcn10_hubp), GFP_KERNEL);
+
+ if (!hubp1)
+ return NULL;
+
+ dcn10_hubp_construct(hubp1, ctx, inst,
+ &mi_regs[inst], &mi_shift, &mi_mask);
+ return &hubp1->base;
+}
+
+static void get_pixel_clock_parameters(
+ const struct pipe_ctx *pipe_ctx,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ pixel_clk_params->requested_pix_clk = stream->timing.pix_clk_khz;
+ pixel_clk_params->encoder_object_id = stream->sink->link->link_enc->id;
+ pixel_clk_params->signal_type = pipe_ctx->stream->signal;
+ pixel_clk_params->controller_id = pipe_ctx->pipe_idx + 1;
+ /* TODO: un-hardcode*/
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->flags.ENABLE_SS = 0;
+ pixel_clk_params->color_depth =
+ stream->timing.display_color_depth;
+ pixel_clk_params->flags.DISPLAY_BLANKED = 1;
+ pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pixel_clk_params->color_depth = COLOR_DEPTH_888;
+
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pixel_clk_params->requested_pix_clk /= 2;
+
+}
+
+static void build_clamping_params(struct dc_stream_state *stream)
+{
+ stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
+ stream->clamping.c_depth = stream->timing.display_color_depth;
+ stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
+}
+
+static void build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
+{
+
+ get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
+
+ pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
+ pipe_ctx->clock_source,
+ &pipe_ctx->stream_res.pix_clk_params,
+ &pipe_ctx->pll_settings);
+
+ pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ build_clamping_params(pipe_ctx->stream);
+}
+
+static enum dc_status build_mapped_resource(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
+
+ /*TODO Seems unneeded anymore */
+ /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
+ if (stream != NULL && old_context->streams[i] != NULL) {
+ todo: shouldn't have to copy missing parameter here
+ resource_build_bit_depth_reduction_params(stream,
+ &stream->bit_depth_params);
+ stream->clamping.pixel_encoding =
+ stream->timing.pixel_encoding;
+
+ resource_build_bit_depth_reduction_params(stream,
+ &stream->bit_depth_params);
+ build_clamping_params(stream);
+
+ continue;
+ }
+ }
+ */
+
+ if (!pipe_ctx)
+ return DC_ERROR_UNEXPECTED;
+
+ build_pipe_hw_param(pipe_ctx);
+ return DC_OK;
+}
+
+enum dc_status dcn10_add_stream_to_ctx(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ result = resource_map_pool_resources(dc, new_ctx, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, new_ctx, dc_stream);
+
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, new_ctx, dc_stream);
+
+ return result;
+}
+
+enum dc_status dcn10_validate_guaranteed(
+ struct dc *dc,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context)
+{
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+
+ context->streams[0] = dc_stream;
+ dc_stream_retain(context->streams[0]);
+ context->stream_count++;
+
+ result = resource_map_pool_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = resource_map_phy_clock_resources(dc, context, dc_stream);
+
+ if (result == DC_OK)
+ result = build_mapped_resource(dc, context, dc_stream);
+
+ if (result == DC_OK) {
+ validate_guaranteed_copy_streams(
+ context, dc->caps.max_streams);
+ result = resource_build_scaling_params_for_context(dc, context);
+ }
+ if (result == DC_OK && !dcn_validate_bandwidth(dc, context))
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+
+ return result;
+}
+
+static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
+
+ if (!head_pipe) {
+ ASSERT(0);
+ return NULL;
+ }
+
+ if (!idle_pipe)
+ return NULL;
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+
+ return idle_pipe;
+}
+
+enum dcc_control {
+ dcc_control__256_256_xxx,
+ dcc_control__128_128_xxx,
+ dcc_control__256_64_64,
+};
+
+enum segment_order {
+ segment_order__na,
+ segment_order__contiguous,
+ segment_order__non_contiguous,
+};
+
+static bool dcc_support_pixel_format(
+ enum surface_pixel_format format,
+ unsigned int *bytes_per_element)
+{
+ /* DML: get_bytes_per_element */
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ *bytes_per_element = 2;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ *bytes_per_element = 4;
+ return true;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ *bytes_per_element = 8;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool dcc_support_swizzle(
+ enum swizzle_mode_values swizzle,
+ unsigned int bytes_per_element,
+ enum segment_order *segment_order_horz,
+ enum segment_order *segment_order_vert)
+{
+ bool standard_swizzle = false;
+ bool display_swizzle = false;
+
+ switch (swizzle) {
+ case DC_SW_4KB_S:
+ case DC_SW_64KB_S:
+ case DC_SW_VAR_S:
+ case DC_SW_4KB_S_X:
+ case DC_SW_64KB_S_X:
+ case DC_SW_VAR_S_X:
+ standard_swizzle = true;
+ break;
+ case DC_SW_4KB_D:
+ case DC_SW_64KB_D:
+ case DC_SW_VAR_D:
+ case DC_SW_4KB_D_X:
+ case DC_SW_64KB_D_X:
+ case DC_SW_VAR_D_X:
+ display_swizzle = true;
+ break;
+ default:
+ break;
+ }
+
+ if (bytes_per_element == 1 && standard_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__na;
+ return true;
+ }
+ if (bytes_per_element == 2 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 4 && standard_swizzle) {
+ *segment_order_horz = segment_order__non_contiguous;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && standard_swizzle) {
+ *segment_order_horz = segment_order__na;
+ *segment_order_vert = segment_order__contiguous;
+ return true;
+ }
+ if (bytes_per_element == 8 && display_swizzle) {
+ *segment_order_horz = segment_order__contiguous;
+ *segment_order_vert = segment_order__non_contiguous;
+ return true;
+ }
+
+ return false;
+}
+
+static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ /* copied from DML. might want to refactor DML to leverage from DML */
+ /* DML : get_blk256_size */
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static void det_request_size(
+ unsigned int height,
+ unsigned int width,
+ unsigned int bpe,
+ bool *req128_horz_wc,
+ bool *req128_vert_wc)
+{
+ unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */
+
+ unsigned int blk256_height = 0;
+ unsigned int blk256_width = 0;
+ unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc;
+
+ get_blk256_size(&blk256_width, &blk256_height, bpe);
+
+ swath_bytes_horz_wc = height * blk256_height * bpe;
+ swath_bytes_vert_wc = width * blk256_width * bpe;
+
+ *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+
+ *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ?
+ false : /* full 256B request */
+ true; /* half 128b request */
+}
+
+static bool get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */
+ enum dcc_control dcc_control;
+ unsigned int bpe;
+ enum segment_order segment_order_horz, segment_order_vert;
+ bool req128_horz_wc, req128_vert_wc;
+
+ memset(output, 0, sizeof(*output));
+
+ if (dc->debug.disable_dcc == DCC_DISABLE)
+ return false;
+
+ if (!dcc_support_pixel_format(input->format,
+ &bpe))
+ return false;
+
+ if (!dcc_support_swizzle(input->swizzle_mode, bpe,
+ &segment_order_horz, &segment_order_vert))
+ return false;
+
+ det_request_size(input->surface_size.height, input->surface_size.width,
+ bpe, &req128_horz_wc, &req128_vert_wc);
+
+ if (!req128_horz_wc && !req128_vert_wc) {
+ dcc_control = dcc_control__256_256_xxx;
+ } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) {
+ if (!req128_horz_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_horz == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else if (input->scan == SCAN_DIRECTION_VERTICAL) {
+ if (!req128_vert_wc)
+ dcc_control = dcc_control__256_256_xxx;
+ else if (segment_order_vert == segment_order__contiguous)
+ dcc_control = dcc_control__128_128_xxx;
+ else
+ dcc_control = dcc_control__256_64_64;
+ } else {
+ if ((req128_horz_wc &&
+ segment_order_horz == segment_order__non_contiguous) ||
+ (req128_vert_wc &&
+ segment_order_vert == segment_order__non_contiguous))
+ /* access_dir not known, must use most constraining */
+ dcc_control = dcc_control__256_64_64;
+ else
+ /* reg128 is true for either horz and vert
+ * but segment_order is contiguous
+ */
+ dcc_control = dcc_control__128_128_xxx;
+ }
+
+ if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE &&
+ dcc_control != dcc_control__256_256_xxx)
+ return false;
+
+ switch (dcc_control) {
+ case dcc_control__256_256_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 256;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__128_128_xxx:
+ output->grph.rgb.max_uncompressed_blk_size = 128;
+ output->grph.rgb.max_compressed_blk_size = 128;
+ output->grph.rgb.independent_64b_blks = false;
+ break;
+ case dcc_control__256_64_64:
+ output->grph.rgb.max_uncompressed_blk_size = 256;
+ output->grph.rgb.max_compressed_blk_size = 64;
+ output->grph.rgb.independent_64b_blks = true;
+ break;
+ }
+
+ output->capable = true;
+ output->const_color_support = false;
+
+ return true;
+}
+
+
+static void dcn10_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool);
+
+ destruct(dcn10_pool);
+ kfree(dcn10_pool);
+ *pool = NULL;
+}
+
+static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps)
+{
+ if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
+ && caps->max_video_width != 0
+ && plane_state->src_rect.width > caps->max_video_width)
+ return DC_FAIL_SURFACE_VALIDATE;
+
+ return DC_OK;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = get_dcc_compression_cap
+};
+
+static struct resource_funcs dcn10_res_pool_funcs = {
+ .destroy = dcn10_destroy_resource_pool,
+ .link_enc_create = dcn10_link_encoder_create,
+ .validate_guaranteed = dcn10_validate_guaranteed,
+ .validate_bandwidth = dcn_validate_bandwidth,
+ .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
+ .validate_plane = dcn10_validate_plane,
+ .add_stream_to_ctx = dcn10_add_stream_to_ctx
+};
+
+static uint32_t read_pipe_fuses(struct dc_context *ctx)
+{
+ uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0);
+ /* RV1 support max 4 pipes */
+ value = value & 0xf;
+ return value;
+}
+
+static bool construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn10_resource_pool *pool)
+{
+ int i;
+ int j;
+ struct dc_context *ctx = dc->ctx;
+ uint32_t pipe_fuses = read_pipe_fuses(ctx);
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap;
+ pool->base.funcs = &dcn10_res_pool_funcs;
+
+ /*
+ * TODO fill in from actual raven resource when we create
+ * more than virtual encoder
+ */
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ /* max pipe num for ASIC before check pipe fuses */
+ pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+
+ dc->caps.max_video_width = 3840;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.max_cursor_size = 256;
+
+ dc->caps.max_slave_planes = 1;
+
+ if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
+ dc->debug = debug_defaults_drv;
+ else
+ dc->debug = debug_defaults_diags;
+
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL0] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL1] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL2] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
+ pool->base.clock_sources[DCN10_CLK_SRC_PLL3] =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+
+ pool->base.clk_src_count = DCN10_CLK_SRC_TOTAL;
+
+ pool->base.dp_clock_source =
+ dcn10_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ /* todo: not reuse phy_pll registers */
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ BREAK_TO_DEBUGGER();
+ goto clock_source_create_fail;
+ }
+ }
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pool->base.display_clock = dce120_disp_clk_create(ctx);
+ if (pool->base.display_clock == NULL) {
+ dm_error("DC: failed to create display clock!\n");
+ BREAK_TO_DEBUGGER();
+ goto disp_clk_create_fail;
+ }
+ }
+
+ pool->base.dmcu = dcn10_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ pool->base.abm = dce_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ if (pool->base.abm == NULL) {
+ dm_error("DC: failed to create abm!\n");
+ BREAK_TO_DEBUGGER();
+ goto res_create_fail;
+ }
+
+ dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
+ memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
+ memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
+
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->urgent_latency = 3;
+ dc->debug.disable_dmcu = true;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 41.60f;
+ }
+
+
+ dc->dcn_soc->number_of_channels = dc->ctx->asic_id.vram_width / ddr4_dram_width;
+ ASSERT(dc->dcn_soc->number_of_channels < 3);
+ if (dc->dcn_soc->number_of_channels == 0)/*old sbios bug*/
+ dc->dcn_soc->number_of_channels = 2;
+
+ if (dc->dcn_soc->number_of_channels == 1) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 19.2f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 17.066f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 14.933f;
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 12.8f;
+ if (ASICREV_IS_RV1_F0(dc->ctx->asic_id.hw_internal_rev)) {
+ dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 20.80f;
+ }
+ }
+
+ pool->base.pp_smu = dcn10_pp_smu_create(ctx);
+
+ if (!dc->debug.disable_pplib_clock_request)
+ dcn_bw_update_from_pplib(dc);
+ dcn_bw_sync_calcs_and_dml(dc);
+ if (!dc->debug.disable_pplib_wm_range) {
+ dc->res_pool = &pool->base;
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+ }
+
+ {
+ #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn10_create(&init_data);
+ if (!pool->base.irqs)
+ goto irqs_create_fail;
+ #endif
+ }
+
+ /* index to valid pipe resource */
+ j = 0;
+ /* mem input -> ipp -> dpp -> opp -> TG */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ /* if pipe is disabled, skip instance of HW pipe,
+ * i.e, skip ASIC register instance
+ */
+ if ((pipe_fuses & (1 << i)) != 0)
+ continue;
+
+ pool->base.hubps[j] = dcn10_hubp_create(ctx, i);
+ if (pool->base.hubps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto mi_create_fail;
+ }
+
+ pool->base.ipps[j] = dcn10_ipp_create(ctx, i);
+ if (pool->base.ipps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto ipp_create_fail;
+ }
+
+ pool->base.dpps[j] = dcn10_dpp_create(ctx, i);
+ if (pool->base.dpps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create dpp!\n");
+ goto dpp_create_fail;
+ }
+
+ pool->base.opps[j] = dcn10_opp_create(ctx, i);
+ if (pool->base.opps[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto opp_create_fail;
+ }
+
+ pool->base.timing_generators[j] = dcn10_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[j] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create tg!\n");
+ goto otg_create_fail;
+ }
+ /* check next valid pipe */
+ j++;
+ }
+
+ /* valid pipe num */
+ pool->base.pipe_count = j;
+
+ /* within dml lib, it is hard code to 4. If ASIC pipe is fused,
+ * the value may be changed
+ */
+ dc->dml.ip.max_num_dpp = pool->base.pipe_count;
+ dc->dcn_ip->max_num_dpp = pool->base.pipe_count;
+
+ pool->base.mpc = dcn10_mpc_create(ctx);
+ if (pool->base.mpc == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error("DC: failed to create mpc!\n");
+ goto mpc_create_fail;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
+ &res_create_funcs : &res_create_maximus_funcs)))
+ goto res_create_fail;
+
+ dcn10_hw_sequencer_construct(dc);
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ dc->cap_funcs = cap_funcs;
+
+ return true;
+
+disp_clk_create_fail:
+mpc_create_fail:
+otg_create_fail:
+opp_create_fail:
+dpp_create_fail:
+ipp_create_fail:
+mi_create_fail:
+irqs_create_fail:
+res_create_fail:
+clock_source_create_fail:
+
+ destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn10_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc)
+{
+ struct dcn10_resource_pool *pool =
+ kzalloc(sizeof(struct dcn10_resource_pool), GFP_KERNEL);
+
+ if (!pool)
+ return NULL;
+
+ if (construct(num_virtual_links, dc, pool))
+ return &pool->base;
+
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
new file mode 100644
index 000000000000..8f71225bc61b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -0,0 +1,47 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCN10_H__
+#define __DC_RESOURCE_DCN10_H__
+
+#include "core_types.h"
+
+#define TO_DCN10_RES_POOL(pool)\
+ container_of(pool, struct dcn10_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+struct dcn10_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn10_create_resource_pool(
+ uint8_t num_virtual_links,
+ struct dc *dc);
+
+
+#endif /* __DC_RESOURCE_DCN10_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
new file mode 100644
index 000000000000..fced178c8c79
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
@@ -0,0 +1,1200 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn10_timing_generator.h"
+#include "dc.h"
+
+#define REG(reg)\
+ tgn10->tg_regs->reg
+
+#define CTX \
+ tgn10->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ tgn10->tg_shift->field_name, tgn10->tg_mask->field_name
+
+#define STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN 0x100
+
+/**
+* apply_front_porch_workaround TODO FPGA still need?
+*
+* This is a workaround for a bug that has existed since R5xx and has not been
+* fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
+*/
+static void tgn10_apply_front_porch_workaround(
+ struct timing_generator *tg,
+ struct dc_crtc_timing *timing)
+{
+ if (timing->flags.INTERLACE == 1) {
+ if (timing->v_front_porch < 2)
+ timing->v_front_porch = 2;
+ } else {
+ if (timing->v_front_porch < 1)
+ timing->v_front_porch = 1;
+ }
+}
+
+static void tgn10_program_global_sync(
+ struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (tg->dlg_otg_param.vstartup_start == 0) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ REG_SET(OTG_VSTARTUP_PARAM, 0,
+ VSTARTUP_START, tg->dlg_otg_param.vstartup_start);
+
+ REG_SET_2(OTG_VUPDATE_PARAM, 0,
+ VUPDATE_OFFSET, tg->dlg_otg_param.vupdate_offset,
+ VUPDATE_WIDTH, tg->dlg_otg_param.vupdate_width);
+
+ REG_SET(OTG_VREADY_PARAM, 0,
+ VREADY_OFFSET, tg->dlg_otg_param.vready_offset);
+}
+
+static void tgn10_disable_stereo(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_STEREO_CONTROL, 0,
+ OTG_STEREO_EN, 0);
+
+ REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+ OTG_3D_STRUCTURE_EN, 0,
+ OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
+ OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
+
+ REG_UPDATE(OPPBUF_CONTROL,
+ OPPBUF_ACTIVE_WIDTH, 0);
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_3D_VACT_SPACE1_SIZE, 0);
+}
+
+/**
+ * program_timing_generator used by mode timing set
+ * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.
+ * Including SYNC. Call BIOS command table to program Timings.
+ */
+static void tgn10_program_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *dc_crtc_timing,
+ bool use_vbios)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t vesa_sync_start;
+ uint32_t asic_blank_end;
+ uint32_t asic_blank_start;
+ uint32_t v_total;
+ uint32_t v_sync_end;
+ uint32_t v_init, v_fp2;
+ uint32_t h_sync_polarity, v_sync_polarity;
+ uint32_t interlace_factor;
+ uint32_t start_point = 0;
+ uint32_t field_num = 0;
+ uint32_t h_div_2;
+ int32_t vertical_line_start;
+
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ patched_crtc_timing = *dc_crtc_timing;
+ tgn10_apply_front_porch_workaround(tg, &patched_crtc_timing);
+
+ /* Load horizontal timing */
+
+ /* CRTC_H_TOTAL = vesa.h_total - 1 */
+ REG_SET(OTG_H_TOTAL, 0,
+ OTG_H_TOTAL, patched_crtc_timing.h_total - 1);
+
+ /* h_sync_start = 0, h_sync_end = vesa.h_sync_width */
+ REG_UPDATE_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, 0,
+ OTG_H_SYNC_A_END, patched_crtc_timing.h_sync_width);
+
+ /* asic_h_blank_end = HsyncWidth + HbackPorch =
+ * vesa. usHorizontalTotal - vesa. usHorizontalSyncStart -
+ * vesa.h_left_border
+ */
+ vesa_sync_start = patched_crtc_timing.h_addressable +
+ patched_crtc_timing.h_border_right +
+ patched_crtc_timing.h_front_porch;
+
+ asic_blank_end = patched_crtc_timing.h_total -
+ vesa_sync_start -
+ patched_crtc_timing.h_border_left;
+
+ /* h_blank_start = v_blank_end + v_active */
+ asic_blank_start = asic_blank_end +
+ patched_crtc_timing.h_border_left +
+ patched_crtc_timing.h_addressable +
+ patched_crtc_timing.h_border_right;
+
+ REG_UPDATE_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, asic_blank_start,
+ OTG_H_BLANK_END, asic_blank_end);
+
+ /* h_sync polarity */
+ h_sync_polarity = patched_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ?
+ 0 : 1;
+
+ REG_UPDATE(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, h_sync_polarity);
+
+ /* Load vertical timing */
+
+ /* CRTC_V_TOTAL = v_total - 1 */
+ if (patched_crtc_timing.flags.INTERLACE) {
+ interlace_factor = 2;
+ v_total = 2 * patched_crtc_timing.v_total;
+ } else {
+ interlace_factor = 1;
+ v_total = patched_crtc_timing.v_total - 1;
+ }
+ REG_SET(OTG_V_TOTAL, 0,
+ OTG_V_TOTAL, v_total);
+
+ /* In case of V_TOTAL_CONTROL is on, make sure OTG_V_TOTAL_MAX and
+ * OTG_V_TOTAL_MIN are equal to V_TOTAL.
+ */
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, v_total);
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, v_total);
+
+ /* v_sync_start = 0, v_sync_end = v_sync_width */
+ v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor;
+
+ REG_UPDATE_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, 0,
+ OTG_V_SYNC_A_END, v_sync_end);
+
+ vesa_sync_start = patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom +
+ patched_crtc_timing.v_front_porch;
+
+ asic_blank_end = (patched_crtc_timing.v_total -
+ vesa_sync_start -
+ patched_crtc_timing.v_border_top)
+ * interlace_factor;
+
+ /* v_blank_start = v_blank_end + v_active */
+ asic_blank_start = asic_blank_end +
+ (patched_crtc_timing.v_border_top +
+ patched_crtc_timing.v_addressable +
+ patched_crtc_timing.v_border_bottom)
+ * interlace_factor;
+
+ REG_UPDATE_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, asic_blank_start,
+ OTG_V_BLANK_END, asic_blank_end);
+
+ /* Use OTG_VERTICAL_INTERRUPT2 replace VUPDATE interrupt,
+ * program the reg for interrupt postition.
+ */
+ vertical_line_start = asic_blank_end - tg->dlg_otg_param.vstartup_start + 1;
+ if (vertical_line_start < 0) {
+ ASSERT(0);
+ vertical_line_start = 0;
+ }
+ REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
+ OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
+
+ /* v_sync polarity */
+ v_sync_polarity = patched_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ?
+ 0 : 1;
+
+ REG_UPDATE(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, v_sync_polarity);
+
+ v_init = asic_blank_start;
+ if (tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ tg->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ tg->dlg_otg_param.signal == SIGNAL_TYPE_EDP) {
+ start_point = 1;
+ if (patched_crtc_timing.flags.INTERLACE == 1)
+ field_num = 1;
+ }
+ v_fp2 = 0;
+ if (tg->dlg_otg_param.vstartup_start > asic_blank_end)
+ v_fp2 = tg->dlg_otg_param.vstartup_start > asic_blank_end;
+
+ /* Interlace */
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ REG_UPDATE(OTG_INTERLACE_CONTROL,
+ OTG_INTERLACE_ENABLE, 1);
+ v_init = v_init / 2;
+ if ((tg->dlg_otg_param.vstartup_start/2)*2 > asic_blank_end)
+ v_fp2 = v_fp2 / 2;
+ }
+ else
+ REG_UPDATE(OTG_INTERLACE_CONTROL,
+ OTG_INTERLACE_ENABLE, 0);
+
+
+ /* VTG enable set to 0 first VInit */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ REG_UPDATE_2(CONTROL,
+ VTG0_FP2, v_fp2,
+ VTG0_VCOUNT_INIT, v_init);
+
+ /* original code is using VTG offset to address OTG reg, seems wrong */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_START_POINT_CNTL, start_point,
+ OTG_FIELD_NUMBER_CNTL, field_num);
+
+ tgn10_program_global_sync(tg);
+
+ /* TODO
+ * patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
+ * program_horz_count_by_2
+ * for DVI 30bpp mode, 0 otherwise
+ * program_horz_count_by_2(tg, &patched_crtc_timing);
+ */
+
+ /* Enable stereo - only when we need to pack 3D frame. Other types
+ * of stereo handled in explicit call
+ */
+ h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
+ 1 : 0;
+
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_BY2, h_div_2);
+
+}
+
+/**
+ * unblank_crtc
+ * Call ASIC Control Object to UnBlank CRTC.
+ */
+static void tgn10_unblank_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t vertical_interrupt_enable = 0;
+
+ REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
+ OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
+
+ /* temporary work around for vertical interrupt, once vertical interrupt enabled,
+ * this check will be removed.
+ */
+ if (vertical_interrupt_enable)
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
+
+ REG_UPDATE_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 0,
+ OTG_BLANK_DE_MODE, 0);
+}
+
+/**
+ * blank_crtc
+ * Call ASIC Control Object to Blank CRTC.
+ */
+
+static void tgn10_blank_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_UPDATE_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 1,
+ OTG_BLANK_DE_MODE, 0);
+
+ /* todo: why are we waiting for BLANK_DATA_EN? shouldn't we be waiting
+ * for status?
+ */
+ REG_WAIT(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, 1,
+ 1, 100000);
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+}
+
+static void tgn10_set_blank(struct timing_generator *tg,
+ bool enable_blanking)
+{
+ if (enable_blanking)
+ tgn10_blank_crtc(tg);
+ else
+ tgn10_unblank_crtc(tg);
+}
+
+static bool tgn10_is_blanked(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t blank_en;
+ uint32_t blank_state;
+
+ REG_GET_2(OTG_BLANK_CONTROL,
+ OTG_BLANK_DATA_EN, &blank_en,
+ OTG_CURRENT_BLANK_STATE, &blank_state);
+
+ return blank_en && blank_state;
+}
+
+static void tgn10_enable_optc_clock(struct timing_generator *tg, bool enable)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (enable) {
+ REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_EN, 1,
+ OPTC_INPUT_CLK_GATE_DIS, 1);
+
+ REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_ON, 1,
+ 1, 1000);
+
+ /* Enable clock */
+ REG_UPDATE_2(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_EN, 1,
+ OTG_CLOCK_GATE_DIS, 1);
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_ON, 1,
+ 1, 1000);
+ } else {
+ REG_UPDATE_2(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_GATE_DIS, 0,
+ OTG_CLOCK_EN, 0);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_CLOCK_ON, 0,
+ 1, 1000);
+
+ REG_UPDATE_2(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_GATE_DIS, 0,
+ OPTC_INPUT_CLK_EN, 0);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OPTC_INPUT_CLOCK_CONTROL,
+ OPTC_INPUT_CLK_ON, 0,
+ 1, 1000);
+ }
+}
+
+/**
+ * Enable CRTC
+ * Enable CRTC - call ASIC Control Object to enable Timing generator.
+ */
+static bool tgn10_enable_crtc(struct timing_generator *tg)
+{
+ /* TODO FPGA wait for answer
+ * OTG_MASTER_UPDATE_MODE != CRTC_MASTER_UPDATE_MODE
+ * OTG_MASTER_UPDATE_LOCK != CRTC_MASTER_UPDATE_LOCK
+ */
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* opp instance for OTG. For DCN1.0, ODM is remoed.
+ * OPP and OPTC should 1:1 mapping
+ */
+ REG_UPDATE(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SRC_SEL, tg->inst);
+
+ /* VTG enable first is for HW workaround */
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 1);
+
+ /* Enable CRTC */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 3,
+ OTG_MASTER_EN, 1);
+
+ return true;
+}
+
+/* disable_crtc - call ASIC Control Object to disable Timing generator. */
+static bool tgn10_disable_crtc(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* disable otg request until end of the first line
+ * in the vertical blank region
+ */
+ REG_UPDATE_2(OTG_CONTROL,
+ OTG_DISABLE_POINT_CNTL, 3,
+ OTG_MASTER_EN, 0);
+
+ REG_UPDATE(CONTROL,
+ VTG0_ENABLE, 0);
+
+ /* CRTC disabled, so disable clock. */
+ REG_WAIT(OTG_CLOCK_CONTROL,
+ OTG_BUSY, 0,
+ 1, 100000);
+
+ return true;
+}
+
+
+static void tgn10_program_blank_color(
+ struct timing_generator *tg,
+ const struct tg_color *black_color)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET_3(OTG_BLACK_COLOR, 0,
+ OTG_BLACK_COLOR_B_CB, black_color->color_b_cb,
+ OTG_BLACK_COLOR_G_Y, black_color->color_g_y,
+ OTG_BLACK_COLOR_R_CR, black_color->color_r_cr);
+}
+
+static bool tgn10_validate_timing(
+ struct timing_generator *tg,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t interlace_factor;
+ uint32_t v_blank;
+ uint32_t h_blank;
+ uint32_t min_v_blank;
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ ASSERT(timing != NULL);
+
+ interlace_factor = timing->flags.INTERLACE ? 2 : 1;
+ v_blank = (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom) *
+ interlace_factor;
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
+ return false;
+
+ /* Temporarily blocking interlacing mode until it's supported */
+ if (timing->flags.INTERLACE == 1)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > tgn10->max_h_total ||
+ timing->v_total > tgn10->max_v_total)
+ return false;
+
+
+ if (h_blank < tgn10->min_h_blank)
+ return false;
+
+ if (timing->h_sync_width < tgn10->min_h_sync_width ||
+ timing->v_sync_width < tgn10->min_v_sync_width)
+ return false;
+
+ min_v_blank = timing->flags.INTERLACE?tgn10->min_v_blank_interlace:tgn10->min_v_blank;
+
+ if (v_blank < min_v_blank)
+ return false;
+
+ return true;
+
+}
+
+/*
+ * get_vblank_counter
+ *
+ * @brief
+ * Get counter for vertical blanks. use register CRTC_STATUS_FRAME_COUNT which
+ * holds the counter of frames.
+ *
+ * @param
+ * struct timing_generator *tg - [in] timing generator which controls the
+ * desired CRTC
+ *
+ * @return
+ * Counter of frames, which should equal to number of vblanks.
+ */
+static uint32_t tgn10_get_vblank_counter(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t frame_count;
+
+ REG_GET(OTG_STATUS_FRAME_COUNT,
+ OTG_FRAME_COUNT, &frame_count);
+
+ return frame_count;
+}
+
+static void tgn10_lock(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, tg->inst);
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ if (tg->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 100);
+}
+
+static void tgn10_unlock(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+
+ /* why are we waiting here? */
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_UPDATE_PENDING, 0,
+ 1, 100000);
+}
+
+static void tgn10_get_position(struct timing_generator *tg,
+ struct crtc_position *position)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_GET_2(OTG_STATUS_POSITION,
+ OTG_HORZ_COUNT, &position->horizontal_count,
+ OTG_VERT_COUNT, &position->vertical_count);
+
+ REG_GET(OTG_NOM_VERT_POSITION,
+ OTG_VERT_COUNT_NOM, &position->nominal_vcount);
+}
+
+static bool tgn10_is_counter_moving(struct timing_generator *tg)
+{
+ struct crtc_position position1, position2;
+
+ tg->funcs->get_position(tg, &position1);
+ tg->funcs->get_position(tg, &position2);
+
+ if (position1.horizontal_count == position2.horizontal_count &&
+ position1.vertical_count == position2.vertical_count)
+ return false;
+ else
+ return true;
+}
+
+static bool tgn10_did_triggered_reset_occur(
+ struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t occurred;
+
+ REG_GET(OTG_FORCE_COUNT_NOW_CNTL,
+ OTG_FORCE_COUNT_NOW_OCCURRED, &occurred);
+
+ return occurred != 0;
+}
+
+static void tgn10_enable_reset_trigger(struct timing_generator *tg, int source_tg_inst)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ uint32_t falling_edge;
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &falling_edge);
+
+ if (falling_edge)
+ REG_SET_3(OTG_TRIGA_CNTL, 0,
+ /* vsync signal from selected OTG pipe based
+ * on OTG_TRIG_SOURCE_PIPE_SELECT setting
+ */
+ OTG_TRIGA_SOURCE_SELECT, 20,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
+ /* always detect falling edge */
+ OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 1);
+ else
+ REG_SET_3(OTG_TRIGA_CNTL, 0,
+ /* vsync signal from selected OTG pipe based
+ * on OTG_TRIG_SOURCE_PIPE_SELECT setting
+ */
+ OTG_TRIGA_SOURCE_SELECT, 20,
+ OTG_TRIGA_SOURCE_PIPE_SELECT, source_tg_inst,
+ /* always detect rising edge */
+ OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1);
+
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ /* force H count to H_TOTAL and V count to V_TOTAL in
+ * progressive mode and V_TOTAL-1 in interlaced mode
+ */
+ OTG_FORCE_COUNT_NOW_MODE, 2);
+}
+
+static void tgn10_disable_reset_trigger(struct timing_generator *tg)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_WRITE(OTG_TRIGA_CNTL, 0);
+
+ REG_SET(OTG_FORCE_COUNT_NOW_CNTL, 0,
+ OTG_FORCE_COUNT_NOW_CLEAR, 1);
+}
+
+static void tgn10_wait_for_state(struct timing_generator *tg,
+ enum crtc_state state)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ switch (state) {
+ case CRTC_STATE_VBLANK:
+ REG_WAIT(OTG_STATUS,
+ OTG_V_BLANK, 1,
+ 1, 100000); /* 1 vupdate at 10hz */
+ break;
+
+ case CRTC_STATE_VACTIVE:
+ REG_WAIT(OTG_STATUS,
+ OTG_V_ACTIVE_DISP, 1,
+ 1, 100000); /* 1 vupdate at 10hz */
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void tgn10_set_early_control(
+ struct timing_generator *tg,
+ uint32_t early_cntl)
+{
+ /* asic design change, do not need this control
+ * empty for share caller logic
+ */
+}
+
+
+static void tgn10_set_static_screen_control(
+ struct timing_generator *tg,
+ uint32_t value)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ /* Bit 8 is no longer applicable in RV for PSR case,
+ * set bit 8 to 0 if given
+ */
+ if ((value & STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN)
+ != 0)
+ value = value &
+ ~STATIC_SCREEN_EVENT_MASK_RANGETIMING_DOUBLE_BUFFER_UPDATE_EN;
+
+ REG_SET_2(OTG_STATIC_SCREEN_CONTROL, 0,
+ OTG_STATIC_SCREEN_EVENT_MASK, value,
+ OTG_STATIC_SCREEN_FRAME_COUNT, 2);
+}
+
+
+/**
+ *****************************************************************************
+ * Function: set_drr
+ *
+ * @brief
+ * Program dynamic refresh rate registers m_OTGx_OTG_V_TOTAL_*.
+ *
+ *****************************************************************************
+ */
+static void tgn10_set_drr(
+ struct timing_generator *tg,
+ const struct drr_params *params)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ if (params != NULL &&
+ params->vertical_total_max > 0 &&
+ params->vertical_total_min > 0) {
+
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, params->vertical_total_max - 1);
+
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, params->vertical_total_min - 1);
+
+ REG_UPDATE_5(OTG_V_TOTAL_CONTROL,
+ OTG_V_TOTAL_MIN_SEL, 1,
+ OTG_V_TOTAL_MAX_SEL, 1,
+ OTG_FORCE_LOCK_ON_EVENT, 0,
+ OTG_SET_V_TOTAL_MIN_MASK_EN, 0,
+ OTG_SET_V_TOTAL_MIN_MASK, 0);
+ } else {
+ REG_SET(OTG_V_TOTAL_MIN, 0,
+ OTG_V_TOTAL_MIN, 0);
+
+ REG_SET(OTG_V_TOTAL_MAX, 0,
+ OTG_V_TOTAL_MAX, 0);
+
+ REG_UPDATE_4(OTG_V_TOTAL_CONTROL,
+ OTG_SET_V_TOTAL_MIN_MASK, 0,
+ OTG_V_TOTAL_MIN_SEL, 0,
+ OTG_V_TOTAL_MAX_SEL, 0,
+ OTG_FORCE_LOCK_ON_EVENT, 0);
+ }
+}
+
+static void tgn10_set_test_pattern(
+ struct timing_generator *tg,
+ /* TODO: replace 'controller_dp_test_pattern' by 'test_pattern_mode'
+ * because this is not DP-specific (which is probably somewhere in DP
+ * encoder) */
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ enum test_pattern_color_format bit_depth;
+ enum test_pattern_dyn_range dyn_range;
+ enum test_pattern_mode mode;
+ uint32_t pattern_mask;
+ uint32_t pattern_data;
+ /* color ramp generator mixes 16-bits color */
+ uint32_t src_bpc = 16;
+ /* requested bpc */
+ uint32_t dst_bpc;
+ uint32_t index;
+ /* RGB values of the color bars.
+ * Produce two RGB colors: RGB0 - white (all Fs)
+ * and RGB1 - black (all 0s)
+ * (three RGB components for two colors)
+ */
+ uint16_t src_color[6] = {0xFFFF, 0xFFFF, 0xFFFF, 0x0000,
+ 0x0000, 0x0000};
+ /* dest color (converted to the specified color format) */
+ uint16_t dst_color[6];
+ uint32_t inc_base;
+
+ /* translate to bit depth */
+ switch (color_depth) {
+ case COLOR_DEPTH_666:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_6;
+ break;
+ case COLOR_DEPTH_888:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_12;
+ break;
+ default:
+ bit_depth = TEST_PATTERN_COLOR_FORMAT_BPC_8;
+ break;
+ }
+
+ switch (test_pattern) {
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
+ case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
+ {
+ dyn_range = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA ?
+ TEST_PATTERN_DYN_RANGE_CEA :
+ TEST_PATTERN_DYN_RANGE_VESA);
+ mode = TEST_PATTERN_MODE_COLORSQUARES_RGB;
+
+ REG_UPDATE_2(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_HRES, 6);
+
+ REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, dyn_range,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_VERTICALBARS:
+ case CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS:
+ {
+ mode = (test_pattern ==
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS ?
+ TEST_PATTERN_MODE_VERTICALBARS :
+ TEST_PATTERN_MODE_HORIZONTALBARS);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* adjust color to the required colorFormat */
+ for (index = 0; index < 6; index++) {
+ /* dst = 2^dstBpc * src / 2^srcBpc = src >>
+ * (srcBpc - dstBpc);
+ */
+ dst_color[index] =
+ src_color[index] >> (src_bpc - dst_bpc);
+ /* CRTC_TEST_PATTERN_DATA has 16 bits,
+ * lowest 6 are hardwired to ZERO
+ * color bits should be left aligned aligned to MSB
+ * XXXXXXXXXX000000 for 10 bit,
+ * XXXXXXXX00000000 for 8 bit and XXXXXX0000000000 for 6
+ */
+ dst_color[index] <<= (16 - dst_bpc);
+ }
+
+ REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
+
+ /* We have to write the mask before data, similar to pipeline.
+ * For example, for 8 bpc, if we want RGB0 to be magenta,
+ * and RGB1 to be cyan,
+ * we need to make 7 writes:
+ * MASK DATA
+ * 000001 00000000 00000000 set mask to R0
+ * 000010 11111111 00000000 R0 255, 0xFF00, set mask to G0
+ * 000100 00000000 00000000 G0 0, 0x0000, set mask to B0
+ * 001000 11111111 00000000 B0 255, 0xFF00, set mask to R1
+ * 010000 00000000 00000000 R1 0, 0x0000, set mask to G1
+ * 100000 11111111 00000000 G1 255, 0xFF00, set mask to B1
+ * 100000 11111111 00000000 B1 255, 0xFF00
+ *
+ * we will make a loop of 6 in which we prepare the mask,
+ * then write, then prepare the color for next write.
+ * first iteration will write mask only,
+ * but each next iteration color prepared in
+ * previous iteration will be written within new mask,
+ * the last component will written separately,
+ * mask is not changing between 6th and 7th write
+ * and color will be prepared by last iteration
+ */
+
+ /* write color, color values mask in CRTC_TEST_PATTERN_MASK
+ * is B1, G1, R1, B0, G0, R0
+ */
+ pattern_data = 0;
+ for (index = 0; index < 6; index++) {
+ /* prepare color mask, first write PATTERN_DATA
+ * will have all zeros
+ */
+ pattern_mask = (1 << index);
+
+ /* write color component */
+ REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
+ OTG_TEST_PATTERN_MASK, pattern_mask,
+ OTG_TEST_PATTERN_DATA, pattern_data);
+
+ /* prepare next color component,
+ * will be written in the next iteration
+ */
+ pattern_data = dst_color[index];
+ }
+ /* write last color component,
+ * it's been already prepared in the loop
+ */
+ REG_SET_2(OTG_TEST_PATTERN_COLOR, 0,
+ OTG_TEST_PATTERN_MASK, pattern_mask,
+ OTG_TEST_PATTERN_DATA, pattern_data);
+
+ /* enable test pattern */
+ REG_UPDATE_4(OTG_TEST_PATTERN_CONTROL,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+
+ case CONTROLLER_DP_TEST_PATTERN_COLORRAMP:
+ {
+ mode = (bit_depth ==
+ TEST_PATTERN_COLOR_FORMAT_BPC_10 ?
+ TEST_PATTERN_MODE_DUALRAMP_RGB :
+ TEST_PATTERN_MODE_SINGLERAMP_RGB);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ dst_bpc = 6;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ dst_bpc = 8;
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ dst_bpc = 10;
+ break;
+ default:
+ dst_bpc = 8;
+ break;
+ }
+
+ /* increment for the first ramp for one color gradation
+ * 1 gradation for 6-bit color is 2^10
+ * gradations in 16-bit color
+ */
+ inc_base = (src_bpc - dst_bpc);
+
+ switch (bit_depth) {
+ case TEST_PATTERN_COLOR_FORMAT_BPC_6:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, 0,
+ OTG_TEST_PATTERN_HRES, 6,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_8:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, 0,
+ OTG_TEST_PATTERN_HRES, 8,
+ OTG_TEST_PATTERN_VRES, 6,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 0);
+ }
+ break;
+ case TEST_PATTERN_COLOR_FORMAT_BPC_10:
+ {
+ REG_UPDATE_5(OTG_TEST_PATTERN_PARAMETERS,
+ OTG_TEST_PATTERN_INC0, inc_base,
+ OTG_TEST_PATTERN_INC1, inc_base + 2,
+ OTG_TEST_PATTERN_HRES, 8,
+ OTG_TEST_PATTERN_VRES, 5,
+ OTG_TEST_PATTERN_RAMP0_OFFSET, 384 << 6);
+ }
+ break;
+ default:
+ break;
+ }
+
+ REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
+
+ /* enable test pattern */
+ REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
+
+ REG_SET_4(OTG_TEST_PATTERN_CONTROL, 0,
+ OTG_TEST_PATTERN_EN, 1,
+ OTG_TEST_PATTERN_MODE, mode,
+ OTG_TEST_PATTERN_DYNAMIC_RANGE, 0,
+ OTG_TEST_PATTERN_COLOR_FORMAT, bit_depth);
+ }
+ break;
+ case CONTROLLER_DP_TEST_PATTERN_VIDEOMODE:
+ {
+ REG_WRITE(OTG_TEST_PATTERN_CONTROL, 0);
+ REG_WRITE(OTG_TEST_PATTERN_COLOR, 0);
+ REG_WRITE(OTG_TEST_PATTERN_PARAMETERS, 0);
+ }
+ break;
+ default:
+ break;
+
+ }
+}
+
+static void tgn10_get_crtc_scanoutpos(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+ struct crtc_position position;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, v_blank_start,
+ OTG_V_BLANK_END, v_blank_end);
+
+ tgn10_get_position(tg, &position);
+
+ *h_position = position.horizontal_count;
+ *v_position = position.vertical_count;
+}
+
+
+
+static void tgn10_enable_stereo(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
+{
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ uint32_t active_width = timing->h_addressable;
+ uint32_t space1_size = timing->v_total - timing->v_addressable;
+
+ if (flags) {
+ uint32_t stereo_en;
+ stereo_en = flags->FRAME_PACKED == 0 ? 1 : 0;
+
+ if (flags->PROGRAM_STEREO)
+ REG_UPDATE_3(OTG_STEREO_CONTROL,
+ OTG_STEREO_EN, stereo_en,
+ OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
+ OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+
+ if (flags->PROGRAM_POLARITY)
+ REG_UPDATE(OTG_STEREO_CONTROL,
+ OTG_STEREO_EYE_FLAG_POLARITY,
+ flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
+
+ if (flags->DISABLE_STEREO_DP_SYNC)
+ REG_UPDATE(OTG_STEREO_CONTROL,
+ OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
+
+ if (flags->PROGRAM_STEREO)
+ REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+ OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
+ OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
+ OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
+
+ }
+
+ REG_UPDATE(OPPBUF_CONTROL,
+ OPPBUF_ACTIVE_WIDTH, active_width);
+
+ REG_UPDATE(OPPBUF_3D_PARAMETERS_0,
+ OPPBUF_3D_VACT_SPACE1_SIZE, space1_size);
+}
+
+static void tgn10_program_stereo(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags)
+{
+ if (flags->PROGRAM_STEREO)
+ tgn10_enable_stereo(tg, timing, flags);
+ else
+ tgn10_disable_stereo(tg);
+}
+
+
+static bool tgn10_is_stereo_left_eye(struct timing_generator *tg)
+{
+ bool ret = false;
+ uint32_t left_eye = 0;
+ struct dcn10_timing_generator *tgn10 = DCN10TG_FROM_TG(tg);
+
+ REG_GET(OTG_STEREO_STATUS,
+ OTG_STEREO_CURRENT_EYE, &left_eye);
+ if (left_eye == 1)
+ ret = true;
+ else
+ ret = false;
+
+ return ret;
+}
+
+void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+ struct dcn_otg_state *s)
+{
+ REG_GET(OTG_CONTROL,
+ OTG_MASTER_EN, &s->otg_enabled);
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &s->v_blank_start,
+ OTG_V_BLANK_END, &s->v_blank_end);
+
+ REG_GET(OTG_V_SYNC_A_CNTL,
+ OTG_V_SYNC_A_POL, &s->v_sync_a_pol);
+
+ REG_GET(OTG_V_TOTAL,
+ OTG_V_TOTAL, &s->v_total);
+
+ REG_GET(OTG_V_TOTAL_MAX,
+ OTG_V_TOTAL_MAX, &s->v_total_max);
+
+ REG_GET(OTG_V_TOTAL_MIN,
+ OTG_V_TOTAL_MIN, &s->v_total_min);
+
+ REG_GET_2(OTG_V_SYNC_A,
+ OTG_V_SYNC_A_START, &s->v_sync_a_start,
+ OTG_V_SYNC_A_END, &s->v_sync_a_end);
+
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &s->h_blank_start,
+ OTG_H_BLANK_END, &s->h_blank_end);
+
+ REG_GET_2(OTG_H_SYNC_A,
+ OTG_H_SYNC_A_START, &s->h_sync_a_start,
+ OTG_H_SYNC_A_END, &s->h_sync_a_end);
+
+ REG_GET(OTG_H_SYNC_A_CNTL,
+ OTG_H_SYNC_A_POL, &s->h_sync_a_pol);
+
+ REG_GET(OTG_H_TOTAL,
+ OTG_H_TOTAL, &s->h_total);
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status);
+}
+
+
+static const struct timing_generator_funcs dcn10_tg_funcs = {
+ .validate_timing = tgn10_validate_timing,
+ .program_timing = tgn10_program_timing,
+ .program_global_sync = tgn10_program_global_sync,
+ .enable_crtc = tgn10_enable_crtc,
+ .disable_crtc = tgn10_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = tgn10_is_counter_moving,
+ .get_position = tgn10_get_position,
+ .get_frame_count = tgn10_get_vblank_counter,
+ .get_scanoutpos = tgn10_get_crtc_scanoutpos,
+ .set_early_control = tgn10_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = tgn10_wait_for_state,
+ .set_blank = tgn10_set_blank,
+ .is_blanked = tgn10_is_blanked,
+ .set_blank_color = tgn10_program_blank_color,
+ .did_triggered_reset_occur = tgn10_did_triggered_reset_occur,
+ .enable_reset_trigger = tgn10_enable_reset_trigger,
+ .disable_reset_trigger = tgn10_disable_reset_trigger,
+ .lock = tgn10_lock,
+ .unlock = tgn10_unlock,
+ .enable_optc_clock = tgn10_enable_optc_clock,
+ .set_drr = tgn10_set_drr,
+ .set_static_screen_control = tgn10_set_static_screen_control,
+ .set_test_pattern = tgn10_set_test_pattern,
+ .program_stereo = tgn10_program_stereo,
+ .is_stereo_left_eye = tgn10_is_stereo_left_eye
+};
+
+void dcn10_timing_generator_init(struct dcn10_timing_generator *tgn10)
+{
+ tgn10->base.funcs = &dcn10_tg_funcs;
+
+ tgn10->max_h_total = tgn10->tg_mask->OTG_H_TOTAL + 1;
+ tgn10->max_v_total = tgn10->tg_mask->OTG_V_TOTAL + 1;
+
+ tgn10->min_h_blank = 32;
+ tgn10->min_v_blank = 3;
+ tgn10->min_v_blank_interlace = 5;
+ tgn10->min_h_sync_width = 8;
+ tgn10->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
new file mode 100644
index 000000000000..7d4818d7aa31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_TIMING_GENERATOR_DCN10_H__
+#define __DC_TIMING_GENERATOR_DCN10_H__
+
+#include "timing_generator.h"
+
+#define DCN10TG_FROM_TG(tg)\
+ container_of(tg, struct dcn10_timing_generator, base)
+
+#define TG_COMMON_REG_LIST_DCN(inst) \
+ SRI(OTG_VSTARTUP_PARAM, OTG, inst),\
+ SRI(OTG_VUPDATE_PARAM, OTG, inst),\
+ SRI(OTG_VREADY_PARAM, OTG, inst),\
+ SRI(OTG_BLANK_CONTROL, OTG, inst),\
+ SRI(OTG_MASTER_UPDATE_LOCK, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL0, OTG, inst),\
+ SRI(OTG_DOUBLE_BUFFER_CONTROL, OTG, inst),\
+ SRI(OTG_H_TOTAL, OTG, inst),\
+ SRI(OTG_H_BLANK_START_END, OTG, inst),\
+ SRI(OTG_H_SYNC_A, OTG, inst),\
+ SRI(OTG_H_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_H_TIMING_CNTL, OTG, inst),\
+ SRI(OTG_V_TOTAL, OTG, inst),\
+ SRI(OTG_V_BLANK_START_END, OTG, inst),\
+ SRI(OTG_V_SYNC_A, OTG, inst),\
+ SRI(OTG_V_SYNC_A_CNTL, OTG, inst),\
+ SRI(OTG_INTERLACE_CONTROL, OTG, inst),\
+ SRI(OTG_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_CONTROL, OTG, inst),\
+ SRI(OTG_3D_STRUCTURE_CONTROL, OTG, inst),\
+ SRI(OTG_STEREO_STATUS, OTG, inst),\
+ SRI(OTG_V_TOTAL_MAX, OTG, inst),\
+ SRI(OTG_V_TOTAL_MIN, OTG, inst),\
+ SRI(OTG_V_TOTAL_CONTROL, OTG, inst),\
+ SRI(OTG_TRIGA_CNTL, OTG, inst),\
+ SRI(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\
+ SRI(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\
+ SRI(OTG_STATUS_FRAME_COUNT, OTG, inst),\
+ SRI(OTG_STATUS, OTG, inst),\
+ SRI(OTG_STATUS_POSITION, OTG, inst),\
+ SRI(OTG_NOM_VERT_POSITION, OTG, inst),\
+ SRI(OTG_BLACK_COLOR, OTG, inst),\
+ SRI(OTG_CLOCK_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\
+ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\
+ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\
+ SRI(OPTC_DATA_SOURCE_SELECT, ODM, inst),\
+ SRI(OPTC_INPUT_GLOBAL_CONTROL, ODM, inst),\
+ SRI(OPPBUF_CONTROL, OPPBUF, inst),\
+ SRI(OPPBUF_3D_PARAMETERS_0, OPPBUF, inst),\
+ SRI(CONTROL, VTG, inst)
+
+#define TG_COMMON_REG_LIST_DCN1_0(inst) \
+ TG_COMMON_REG_LIST_DCN(inst),\
+ SRI(OTG_TEST_PATTERN_PARAMETERS, OTG, inst),\
+ SRI(OTG_TEST_PATTERN_CONTROL, OTG, inst),\
+ SRI(OTG_TEST_PATTERN_COLOR, OTG, inst)
+
+
+struct dcn_tg_registers {
+ uint32_t OTG_VSTARTUP_PARAM;
+ uint32_t OTG_VUPDATE_PARAM;
+ uint32_t OTG_VREADY_PARAM;
+ uint32_t OTG_BLANK_CONTROL;
+ uint32_t OTG_MASTER_UPDATE_LOCK;
+ uint32_t OTG_GLOBAL_CONTROL0;
+ uint32_t OTG_DOUBLE_BUFFER_CONTROL;
+ uint32_t OTG_H_TOTAL;
+ uint32_t OTG_H_BLANK_START_END;
+ uint32_t OTG_H_SYNC_A;
+ uint32_t OTG_H_SYNC_A_CNTL;
+ uint32_t OTG_H_TIMING_CNTL;
+ uint32_t OTG_V_TOTAL;
+ uint32_t OTG_V_BLANK_START_END;
+ uint32_t OTG_V_SYNC_A;
+ uint32_t OTG_V_SYNC_A_CNTL;
+ uint32_t OTG_INTERLACE_CONTROL;
+ uint32_t OTG_CONTROL;
+ uint32_t OTG_STEREO_CONTROL;
+ uint32_t OTG_3D_STRUCTURE_CONTROL;
+ uint32_t OTG_STEREO_STATUS;
+ uint32_t OTG_V_TOTAL_MAX;
+ uint32_t OTG_V_TOTAL_MIN;
+ uint32_t OTG_V_TOTAL_CONTROL;
+ uint32_t OTG_TRIGA_CNTL;
+ uint32_t OTG_FORCE_COUNT_NOW_CNTL;
+ uint32_t OTG_STATIC_SCREEN_CONTROL;
+ uint32_t OTG_STATUS_FRAME_COUNT;
+ uint32_t OTG_STATUS;
+ uint32_t OTG_STATUS_POSITION;
+ uint32_t OTG_NOM_VERT_POSITION;
+ uint32_t OTG_BLACK_COLOR;
+ uint32_t OTG_TEST_PATTERN_PARAMETERS;
+ uint32_t OTG_TEST_PATTERN_CONTROL;
+ uint32_t OTG_TEST_PATTERN_COLOR;
+ uint32_t OTG_CLOCK_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL;
+ uint32_t OTG_VERTICAL_INTERRUPT2_POSITION;
+ uint32_t OPTC_INPUT_CLOCK_CONTROL;
+ uint32_t OPTC_DATA_SOURCE_SELECT;
+ uint32_t OPTC_INPUT_GLOBAL_CONTROL;
+ uint32_t OPPBUF_CONTROL;
+ uint32_t OPPBUF_3D_PARAMETERS_0;
+ uint32_t CONTROL;
+};
+
+#define TG_COMMON_MASK_SH_LIST_DCN(mask_sh)\
+ SF(OTG0_OTG_VSTARTUP_PARAM, VSTARTUP_START, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_OFFSET, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_PARAM, VUPDATE_WIDTH, mask_sh),\
+ SF(OTG0_OTG_VREADY_PARAM, VREADY_OFFSET, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DATA_EN, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_BLANK_DE_MODE, mask_sh),\
+ SF(OTG0_OTG_BLANK_CONTROL, OTG_CURRENT_BLANK_STATE, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, OTG_MASTER_UPDATE_LOCK, mask_sh),\
+ SF(OTG0_OTG_MASTER_UPDATE_LOCK, UPDATE_LOCK_STATUS, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
+ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A, OTG_H_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL, OTG_V_TOTAL, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_START, mask_sh),\
+ SF(OTG0_OTG_V_BLANK_START_END, OTG_V_BLANK_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_START, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A, OTG_V_SYNC_A_END, mask_sh),\
+ SF(OTG0_OTG_V_SYNC_A_CNTL, OTG_V_SYNC_A_POL, mask_sh),\
+ SF(OTG0_OTG_INTERLACE_CONTROL, OTG_INTERLACE_ENABLE, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_MASTER_EN, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_START_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EYE_FLAG_POLARITY, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_CONTROL, OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, mask_sh),\
+ SF(OTG0_OTG_STEREO_STATUS, OTG_STEREO_CURRENT_EYE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_EN, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_V_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_3D_STRUCTURE_CONTROL, OTG_3D_STRUCTURE_STEREO_SEL_OVR, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MAX, OTG_V_TOTAL_MAX, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MIN_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_V_TOTAL_MAX_SEL, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_FORCE_LOCK_ON_EVENT, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK_EN, mask_sh),\
+ SF(OTG0_OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_CLEAR, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_MODE, mask_sh),\
+ SF(OTG0_OTG_FORCE_COUNT_NOW_CNTL, OTG_FORCE_COUNT_NOW_OCCURRED, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_SOURCE_PIPE_SELECT, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_RISING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_TRIGA_CNTL, OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_EVENT_MASK, mask_sh),\
+ SF(OTG0_OTG_STATIC_SCREEN_CONTROL, OTG_STATIC_SCREEN_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_FRAME_COUNT, OTG_FRAME_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_BLANK, mask_sh),\
+ SF(OTG0_OTG_STATUS, OTG_V_ACTIVE_DISP, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_HORZ_COUNT, mask_sh),\
+ SF(OTG0_OTG_STATUS_POSITION, OTG_VERT_COUNT, mask_sh),\
+ SF(OTG0_OTG_NOM_VERT_POSITION, OTG_VERT_COUNT_NOM, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_B_CB, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_G_Y, mask_sh),\
+ SF(OTG0_OTG_BLACK_COLOR, OTG_BLACK_COLOR_R_CR, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_BUSY, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\
+ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\
+ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_ON, mask_sh),\
+ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_GATE_DIS, mask_sh),\
+ SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(OPPBUF0_OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, mask_sh),\
+ SF(OPPBUF0_OPPBUF_3D_PARAMETERS_0, OPPBUF_3D_VACT_SPACE1_SIZE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
+ SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh)
+
+#define TG_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
+ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC0, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_INC1, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_VRES, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_HRES, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_PARAMETERS, OTG_TEST_PATTERN_RAMP0_OFFSET, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_EN, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_MODE, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_DYNAMIC_RANGE, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_CONTROL, OTG_TEST_PATTERN_COLOR_FORMAT, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_MASK, mask_sh),\
+ SF(OTG0_OTG_TEST_PATTERN_COLOR, OTG_TEST_PATTERN_DATA, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SRC_SEL, mask_sh)
+
+#define TG_REG_FIELD_LIST(type) \
+ type VSTARTUP_START;\
+ type VUPDATE_OFFSET;\
+ type VUPDATE_WIDTH;\
+ type VREADY_OFFSET;\
+ type OTG_BLANK_DATA_EN;\
+ type OTG_BLANK_DE_MODE;\
+ type OTG_CURRENT_BLANK_STATE;\
+ type OTG_MASTER_UPDATE_LOCK;\
+ type UPDATE_LOCK_STATUS;\
+ type OTG_UPDATE_PENDING;\
+ type OTG_MASTER_UPDATE_LOCK_SEL;\
+ type OTG_BLANK_DATA_DOUBLE_BUFFER_EN;\
+ type OTG_H_TOTAL;\
+ type OTG_H_BLANK_START;\
+ type OTG_H_BLANK_END;\
+ type OTG_H_SYNC_A_START;\
+ type OTG_H_SYNC_A_END;\
+ type OTG_H_SYNC_A_POL;\
+ type OTG_H_TIMING_DIV_BY2;\
+ type OTG_V_TOTAL;\
+ type OTG_V_BLANK_START;\
+ type OTG_V_BLANK_END;\
+ type OTG_V_SYNC_A_START;\
+ type OTG_V_SYNC_A_END;\
+ type OTG_V_SYNC_A_POL;\
+ type OTG_INTERLACE_ENABLE;\
+ type OTG_MASTER_EN;\
+ type OTG_START_POINT_CNTL;\
+ type OTG_DISABLE_POINT_CNTL;\
+ type OTG_FIELD_NUMBER_CNTL;\
+ type OTG_STEREO_EN;\
+ type OTG_STEREO_SYNC_OUTPUT_LINE_NUM;\
+ type OTG_STEREO_SYNC_OUTPUT_POLARITY;\
+ type OTG_STEREO_EYE_FLAG_POLARITY;\
+ type OTG_STEREO_CURRENT_EYE;\
+ type OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP;\
+ type OTG_3D_STRUCTURE_EN;\
+ type OTG_3D_STRUCTURE_V_UPDATE_MODE;\
+ type OTG_3D_STRUCTURE_STEREO_SEL_OVR;\
+ type OTG_V_TOTAL_MAX;\
+ type OTG_V_TOTAL_MIN;\
+ type OTG_V_TOTAL_MIN_SEL;\
+ type OTG_V_TOTAL_MAX_SEL;\
+ type OTG_FORCE_LOCK_ON_EVENT;\
+ type OTG_SET_V_TOTAL_MIN_MASK_EN;\
+ type OTG_SET_V_TOTAL_MIN_MASK;\
+ type OTG_FORCE_COUNT_NOW_CLEAR;\
+ type OTG_FORCE_COUNT_NOW_MODE;\
+ type OTG_FORCE_COUNT_NOW_OCCURRED;\
+ type OTG_TRIGA_SOURCE_SELECT;\
+ type OTG_TRIGA_SOURCE_PIPE_SELECT;\
+ type OTG_TRIGA_RISING_EDGE_DETECT_CNTL;\
+ type OTG_TRIGA_FALLING_EDGE_DETECT_CNTL;\
+ type OTG_STATIC_SCREEN_EVENT_MASK;\
+ type OTG_STATIC_SCREEN_FRAME_COUNT;\
+ type OTG_FRAME_COUNT;\
+ type OTG_V_BLANK;\
+ type OTG_V_ACTIVE_DISP;\
+ type OTG_HORZ_COUNT;\
+ type OTG_VERT_COUNT;\
+ type OTG_VERT_COUNT_NOM;\
+ type OTG_BLACK_COLOR_B_CB;\
+ type OTG_BLACK_COLOR_G_Y;\
+ type OTG_BLACK_COLOR_R_CR;\
+ type OTG_TEST_PATTERN_INC0;\
+ type OTG_TEST_PATTERN_INC1;\
+ type OTG_TEST_PATTERN_VRES;\
+ type OTG_TEST_PATTERN_HRES;\
+ type OTG_TEST_PATTERN_RAMP0_OFFSET;\
+ type OTG_TEST_PATTERN_EN;\
+ type OTG_TEST_PATTERN_MODE;\
+ type OTG_TEST_PATTERN_DYNAMIC_RANGE;\
+ type OTG_TEST_PATTERN_COLOR_FORMAT;\
+ type OTG_TEST_PATTERN_MASK;\
+ type OTG_TEST_PATTERN_DATA;\
+ type OTG_BUSY;\
+ type OTG_CLOCK_EN;\
+ type OTG_CLOCK_ON;\
+ type OTG_CLOCK_GATE_DIS;\
+ type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\
+ type OTG_VERTICAL_INTERRUPT2_LINE_START;\
+ type OPTC_INPUT_CLK_EN;\
+ type OPTC_INPUT_CLK_ON;\
+ type OPTC_INPUT_CLK_GATE_DIS;\
+ type OPTC_SRC_SEL;\
+ type OPTC_SEG0_SRC_SEL;\
+ type OPTC_UNDERFLOW_OCCURRED_STATUS;\
+ type OPPBUF_ACTIVE_WIDTH;\
+ type OPPBUF_3D_VACT_SPACE1_SIZE;\
+ type VTG0_ENABLE;\
+ type VTG0_FP2;\
+ type VTG0_VCOUNT_INIT;
+
+struct dcn_tg_shift {
+ TG_REG_FIELD_LIST(uint8_t)
+};
+
+struct dcn_tg_mask {
+ TG_REG_FIELD_LIST(uint32_t)
+};
+
+struct dcn10_timing_generator {
+ struct timing_generator base;
+
+ const struct dcn_tg_registers *tg_regs;
+ const struct dcn_tg_shift *tg_shift;
+ const struct dcn_tg_mask *tg_mask;
+
+ enum controller_id controller_id;
+
+ uint32_t max_h_total;
+ uint32_t max_v_total;
+
+ uint32_t min_h_blank;
+
+ uint32_t min_h_sync_width;
+ uint32_t min_v_sync_width;
+ uint32_t min_v_blank;
+ uint32_t min_v_blank_interlace;
+};
+
+void dcn10_timing_generator_init(struct dcn10_timing_generator *tg);
+
+struct dcn_otg_state {
+ uint32_t v_blank_start;
+ uint32_t v_blank_end;
+ uint32_t v_sync_a_pol;
+ uint32_t v_total;
+ uint32_t v_total_max;
+ uint32_t v_total_min;
+ uint32_t v_sync_a_start;
+ uint32_t v_sync_a_end;
+ uint32_t h_blank_start;
+ uint32_t h_blank_end;
+ uint32_t h_sync_a_start;
+ uint32_t h_sync_a_end;
+ uint32_t h_sync_a_pol;
+ uint32_t h_total;
+ uint32_t underflow_occurred_status;
+ uint32_t otg_enabled;
+};
+
+void tgn10_read_otg_state(struct dcn10_timing_generator *tgn10,
+ struct dcn_otg_state *s);
+
+#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
new file mode 100644
index 000000000000..ab88f07772a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * This file defines helper functions provided by the Display Manager to
+ * Display Core.
+ */
+#ifndef __DM_HELPERS__
+#define __DM_HELPERS__
+
+#include "dc_types.h"
+#include "dc.h"
+
+struct dp_mst_stream_allocation_table;
+
+enum dc_edid_status dm_helpers_parse_edid_caps(
+ struct dc_context *ctx,
+ const struct dc_edid *edid,
+ struct dc_edid_caps *edid_caps);
+
+/*
+ * Writes payload allocation table in immediate downstream device.
+ */
+bool dm_helpers_dp_mst_write_payload_allocation_table(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ struct dp_mst_stream_allocation_table *proposed_table,
+ bool enable);
+
+/*
+ * Polls for ACT (allocation change trigger) handled and
+ */
+bool dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
+/*
+ * Sends ALLOCATE_PAYLOAD message.
+ */
+bool dm_helpers_dp_mst_send_payload_allocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream,
+ bool enable);
+
+bool dm_helpers_dp_mst_start_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ bool boot);
+
+void dm_helpers_dp_mst_stop_top_mgr(
+ struct dc_context *ctx,
+ const struct dc_link *link);
+/**
+ * OS specific aux read callback.
+ */
+bool dm_helpers_dp_read_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+
+/**
+ * OS specific aux write callback.
+ */
+bool dm_helpers_dp_write_dpcd(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+
+bool dm_helpers_submit_i2c(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ struct i2c_command *cmd);
+
+enum dc_edid_status dm_helpers_read_local_edid(
+ struct dc_context *ctx,
+ struct dc_link *link,
+ struct dc_sink *sink);
+
+
+#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
new file mode 100644
index 000000000000..bbfa83252fc1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DM_PP_SMU_IF__H
+#define DM_PP_SMU_IF__H
+
+/*
+ * interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
+ */
+
+
+struct pp_smu {
+ struct dc_context *ctx;
+};
+
+enum wm_set_id {
+ WM_A,
+ WM_B,
+ WM_C,
+ WM_D,
+ WM_COUNT,
+};
+
+struct pp_smu_wm_set_range {
+ enum wm_set_id wm_inst;
+ uint32_t min_fill_clk_khz;
+ uint32_t max_fill_clk_khz;
+ uint32_t min_drain_clk_khz;
+ uint32_t max_drain_clk_khz;
+};
+
+struct pp_smu_wm_range_sets {
+ uint32_t num_reader_wm_sets;
+ struct pp_smu_wm_set_range reader_wm_sets[WM_COUNT];
+
+ uint32_t num_writer_wm_sets;
+ struct pp_smu_wm_set_range writer_wm_sets[WM_COUNT];
+};
+
+struct pp_smu_display_requirement_rv {
+ /* PPSMC_MSG_SetDisplayCount: count
+ * 0 triggers S0i2 optimization
+ */
+ unsigned int display_count;
+
+ /* PPSMC_MSG_SetHardMinFclkByFreq: khz
+ * FCLK will vary with DPM, but never below requested hard min
+ */
+ unsigned int hard_min_fclk_khz;
+
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+ * fixed clock at requested freq, either from FCH bypass or DFS
+ */
+ unsigned int hard_min_dcefclk_khz;
+
+ /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
+ * when DF is in cstate, dcf clock is further divided down
+ * to just above given frequency
+ */
+ unsigned int min_deep_sleep_dcefclk_mhz;
+};
+
+struct pp_smu_funcs_rv {
+ struct pp_smu pp_smu;
+
+ void (*set_display_requirement)(struct pp_smu *pp,
+ struct pp_smu_display_requirement_rv *req);
+
+ /* which SMU message? are reader and writer WM separate SMU msg? */
+ void (*set_wm_ranges)(struct pp_smu *pp,
+ struct pp_smu_wm_range_sets *ranges);
+
+};
+
+#if 0
+struct pp_smu_funcs_rv {
+
+ /* PPSMC_MSG_SetDisplayCount
+ * 0 triggers S0i2 optimization
+ */
+ void (*set_display_count)(struct pp_smu *pp, int count);
+
+ /* PPSMC_MSG_SetHardMinFclkByFreq
+ * FCLK will vary with DPM, but never below requested hard min
+ */
+ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq
+ * fixed clock at requested freq, either from FCH bypass or DFS
+ */
+ void (*set_hard_min_dcefclk_by_freq)(struct pp_smu *pp, int khz);
+
+ /* PPSMC_MSG_SetMinDeepSleepDcefclk
+ * when DF is in cstate, dcf clock is further divided down
+ * to just above given frequency
+ */
+ void (*set_min_deep_sleep_dcefclk)(struct pp_smu *pp, int mhz);
+
+ /* todo: aesthetic
+ * watermark range table
+ */
+
+ /* todo: functional/feature
+ * PPSMC_MSG_SetHardMinSocclkByFreq: required to support DWB
+ */
+};
+#endif
+
+#endif /* DM_PP_SMU_IF__H */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
new file mode 100644
index 000000000000..d4917037ac42
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * This file defines external dependencies of Display Core.
+ */
+
+#ifndef __DM_SERVICES_H__
+
+#define __DM_SERVICES_H__
+
+/* TODO: remove when DC is complete. */
+#include "dm_services_types.h"
+#include "logger_interface.h"
+#include "link_service_types.h"
+
+#undef DEPRECATED
+
+irq_handler_idx dm_register_interrupt(
+ struct dc_context *ctx,
+ struct dc_interrupt_params *int_params,
+ interrupt_handler ih,
+ void *handler_args);
+
+
+/*
+ *
+ * GPU registers access
+ *
+ */
+
+/* enable for debugging new code, this adds 50k to the driver size. */
+/* #define DM_CHECK_ADDR_0 */
+
+#define dm_read_reg(ctx, address) \
+ dm_read_reg_func(ctx, address, __func__)
+
+static inline uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ return value;
+}
+
+#define dm_write_reg(ctx, address, value) \
+ dm_write_reg_func(ctx, address, value, __func__)
+
+static inline void dm_write_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ uint32_t value,
+ const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+}
+
+static inline uint32_t dm_read_index_reg(
+ const struct dc_context *ctx,
+ enum cgs_ind_reg addr_space,
+ uint32_t index)
+{
+ return cgs_read_ind_register(ctx->cgs_device, addr_space, index);
+}
+
+static inline void dm_write_index_reg(
+ const struct dc_context *ctx,
+ enum cgs_ind_reg addr_space,
+ uint32_t index,
+ uint32_t value)
+{
+ cgs_write_ind_register(ctx->cgs_device, addr_space, index, value);
+}
+
+static inline uint32_t get_reg_field_value_ex(
+ uint32_t reg_value,
+ uint32_t mask,
+ uint8_t shift)
+{
+ return (mask & reg_value) >> shift;
+}
+
+#define get_reg_field_value(reg_value, reg_name, reg_field)\
+ get_reg_field_value_ex(\
+ (reg_value),\
+ reg_name ## __ ## reg_field ## _MASK,\
+ reg_name ## __ ## reg_field ## __SHIFT)
+
+static inline uint32_t set_reg_field_value_ex(
+ uint32_t reg_value,
+ uint32_t value,
+ uint32_t mask,
+ uint8_t shift)
+{
+ ASSERT(mask != 0);
+ return (reg_value & ~mask) | (mask & (value << shift));
+}
+
+#define set_reg_field_value(reg_value, value, reg_name, reg_field)\
+ (reg_value) = set_reg_field_value_ex(\
+ (reg_value),\
+ (value),\
+ reg_name ## __ ## reg_field ## _MASK,\
+ reg_name ## __ ## reg_field ## __SHIFT)
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
+#define FD(reg_field) reg_field ## __SHIFT, \
+ reg_field ## _MASK
+
+/*
+ * return number of poll before condition is met
+ * return 0 if condition is not meet after specified time out tries
+ */
+unsigned int generic_reg_wait(const struct dc_context *ctx,
+ uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
+ unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
+ const char *func_name, int line);
+
+
+/* These macros need to be used with soc15 registers in order to retrieve
+ * the actual offset.
+ */
+#define dm_write_reg_soc15(ctx, reg, inst_offset, value) \
+ dm_write_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, value, __func__)
+
+#define dm_read_reg_soc15(ctx, reg, inst_offset) \
+ dm_read_reg_func(ctx, reg + DCE_BASE.instance[0].segment[reg##_BASE_IDX] + inst_offset, __func__)
+
+#define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
+ generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
+ dm_read_reg_func(ctx, mm##reg_name + DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + inst_offset, __func__), \
+ n, __VA_ARGS__)
+
+#define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
+ generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
+ n, __VA_ARGS__)
+
+#define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
+ get_reg_field_value_ex(\
+ (reg_value),\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
+
+#define set_reg_field_value_soc15(reg_value, value, block, reg_num, reg_name, reg_field)\
+ (reg_value) = set_reg_field_value_ex(\
+ (reg_value),\
+ (value),\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## _MASK,\
+ block ## reg_num ## _ ## reg_name ## __ ## reg_field ## __SHIFT)
+
+/**************************************
+ * Power Play (PP) interfaces
+ **************************************/
+
+/* DAL calls this function to notify PP about clocks it needs for the Mode Set.
+ * This is done *before* it changes DCE clock.
+ *
+ * If required clock is higher than current, then PP will increase the voltage.
+ *
+ * If required clock is lower than current, then PP will defer reduction of
+ * voltage until the call to dc_service_pp_post_dce_clock_change().
+ *
+ * \input - Contains clocks needed for Mode Set.
+ *
+ * \output - Contains clocks adjusted by PP which DAL should use for Mode Set.
+ * Valid only if function returns zero.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dm_pp_pre_dce_clock_change(
+ struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *requested_state,
+ struct dm_pp_gpu_clock_range *actual_state);
+
+/* The returned clocks range are 'static' system clocks which will be used for
+ * mode validation purposes.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dc_service_get_system_clocks_range(
+ const struct dc_context *ctx,
+ struct dm_pp_gpu_clock_range *sys_clks);
+
+/* Gets valid clocks levels from pplib
+ *
+ * input: clk_type - display clk / sclk / mem clk
+ *
+ * output: array of valid clock levels for given type in ascending order,
+ * with invalid levels filtered out
+ *
+ */
+bool dm_pp_get_clock_levels_by_type(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels *clk_level_info);
+
+bool dm_pp_get_clock_levels_by_type_with_latency(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_latency *clk_level_info);
+
+bool dm_pp_get_clock_levels_by_type_with_voltage(
+ const struct dc_context *ctx,
+ enum dm_pp_clock_type clk_type,
+ struct dm_pp_clock_levels_with_voltage *clk_level_info);
+
+bool dm_pp_notify_wm_clock_changes(
+ const struct dc_context *ctx,
+ struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
+
+void dm_pp_get_funcs_rv(struct dc_context *ctx,
+ struct pp_smu_funcs_rv *funcs);
+
+/* DAL calls this function to notify PP about completion of Mode Set.
+ * For PP it means that current DCE clocks are those which were returned
+ * by dc_service_pp_pre_dce_clock_change(), in the 'output' parameter.
+ *
+ * If the clocks are higher than before, then PP does nothing.
+ *
+ * If the clocks are lower than before, then PP reduces the voltage.
+ *
+ * \returns true - call is successful
+ * false - call failed
+ */
+bool dm_pp_apply_display_requirements(
+ const struct dc_context *ctx,
+ const struct dm_pp_display_configuration *pp_display_cfg);
+
+bool dm_pp_apply_power_level_change_request(
+ const struct dc_context *ctx,
+ struct dm_pp_power_level_change_request *level_change_req);
+
+bool dm_pp_apply_clock_for_voltage_request(
+ const struct dc_context *ctx,
+ struct dm_pp_clock_for_voltage_req *clock_for_voltage_req);
+
+bool dm_pp_get_static_clocks(
+ const struct dc_context *ctx,
+ struct dm_pp_static_clock_info *static_clk_info);
+
+/****** end of PP interfaces ******/
+
+struct persistent_data_flag {
+ bool save_per_link;
+ bool save_per_edid;
+};
+
+/* Call to write data in registry editor for persistent data storage.
+ *
+ * \inputs sink - identify edid/link for registry folder creation
+ * module name - identify folders for registry
+ * key name - identify keys within folders for registry
+ * params - value to write in defined folder/key
+ * size - size of the input params
+ * flag - determine whether to save by link or edid
+ *
+ * \returns true - call is successful
+ * false - call failed
+ *
+ * sink module key
+ * -----------------------------------------------------------------------------
+ * NULL NULL NULL - failure
+ * NULL NULL - - create key with param value
+ * under base folder
+ * NULL - NULL - create module folder under base folder
+ * - NULL NULL - failure
+ * NULL - - - create key under module folder
+ * with no edid/link identification
+ * - NULL - - create key with param value
+ * under base folder
+ * - - NULL - create module folder under base folder
+ * - - - - create key under module folder
+ * with edid/link identification
+ */
+bool dm_write_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag);
+
+
+/* Call to read data in registry editor for persistent data storage.
+ *
+ * \inputs sink - identify edid/link for registry folder creation
+ * module name - identify folders for registry
+ * key name - identify keys within folders for registry
+ * size - size of the output params
+ * flag - determine whether it was save by link or edid
+ *
+ * \returns params - value read from defined folder/key
+ * true - call is successful
+ * false - call failed
+ *
+ * sink module key
+ * -----------------------------------------------------------------------------
+ * NULL NULL NULL - failure
+ * NULL NULL - - read key under base folder
+ * NULL - NULL - failure
+ * - NULL NULL - failure
+ * NULL - - - read key under module folder
+ * with no edid/link identification
+ * - NULL - - read key under base folder
+ * - - NULL - failure
+ * - - - - read key under module folder
+ * with edid/link identification
+ */
+bool dm_read_persistent_data(struct dc_context *ctx,
+ const struct dc_sink *sink,
+ const char *module_name,
+ const char *key_name,
+ void *params,
+ unsigned int size,
+ struct persistent_data_flag *flag);
+
+bool dm_query_extended_brightness_caps
+ (struct dc_context *ctx, enum dm_acpi_display_type display,
+ struct dm_acpi_atif_backlight_caps *pCaps);
+
+bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id);
+
+/*
+ *
+ * print-out services
+ *
+ */
+#define dm_log_to_buffer(buffer, size, fmt, args)\
+ vsnprintf(buffer, size, fmt, args)
+
+unsigned long long dm_get_timestamp(struct dc_context *ctx);
+
+/*
+ * Debug and verification hooks
+ */
+bool dm_helpers_dc_conn_log(
+ struct dc_context *ctx,
+ struct log_entry *entry,
+ enum dc_log_type event);
+
+void dm_dtn_log_begin(struct dc_context *ctx);
+void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...);
+void dm_dtn_log_end(struct dc_context *ctx);
+
+#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
new file mode 100644
index 000000000000..fa26cf488b3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DM_SERVICES_TYPES_H__
+#define __DM_SERVICES_TYPES_H__
+
+#include "os_types.h"
+#include "dc_types.h"
+
+#include "dm_pp_smu.h"
+
+struct dm_pp_clock_range {
+ int min_khz;
+ int max_khz;
+};
+
+enum dm_pp_clocks_state {
+ DM_PP_CLOCKS_STATE_INVALID,
+ DM_PP_CLOCKS_STATE_ULTRA_LOW,
+ DM_PP_CLOCKS_STATE_LOW,
+ DM_PP_CLOCKS_STATE_NOMINAL,
+ DM_PP_CLOCKS_STATE_PERFORMANCE,
+
+ /* Starting from DCE11, Max 8 levels of DPM state supported. */
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_INVALID = DM_PP_CLOCKS_STATE_INVALID,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_0,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_1,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_2,
+ /* to be backward compatible */
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_3,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_4,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_5,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_6,
+ DM_PP_CLOCKS_DPM_STATE_LEVEL_7,
+
+ DM_PP_CLOCKS_MAX_STATES
+};
+
+struct dm_pp_gpu_clock_range {
+ enum dm_pp_clocks_state clock_state;
+ struct dm_pp_clock_range sclk;
+ struct dm_pp_clock_range mclk;
+ struct dm_pp_clock_range eclk;
+ struct dm_pp_clock_range dclk;
+};
+
+enum dm_pp_clock_type {
+ DM_PP_CLOCK_TYPE_DISPLAY_CLK = 1,
+ DM_PP_CLOCK_TYPE_ENGINE_CLK, /* System clock */
+ DM_PP_CLOCK_TYPE_MEMORY_CLK,
+ DM_PP_CLOCK_TYPE_DCFCLK,
+ DM_PP_CLOCK_TYPE_DCEFCLK,
+ DM_PP_CLOCK_TYPE_SOCCLK,
+ DM_PP_CLOCK_TYPE_PIXELCLK,
+ DM_PP_CLOCK_TYPE_DISPLAYPHYCLK,
+ DM_PP_CLOCK_TYPE_DPPCLK,
+ DM_PP_CLOCK_TYPE_FCLK,
+};
+
+#define DC_DECODE_PP_CLOCK_TYPE(clk_type) \
+ (clk_type) == DM_PP_CLOCK_TYPE_DISPLAY_CLK ? "Display" : \
+ (clk_type) == DM_PP_CLOCK_TYPE_ENGINE_CLK ? "Engine" : \
+ (clk_type) == DM_PP_CLOCK_TYPE_MEMORY_CLK ? "Memory" : "Invalid"
+
+#define DM_PP_MAX_CLOCK_LEVELS 8
+
+struct dm_pp_clock_levels {
+ uint32_t num_levels;
+ uint32_t clocks_in_khz[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_clock_with_latency {
+ uint32_t clocks_in_khz;
+ uint32_t latency_in_us;
+};
+
+struct dm_pp_clock_levels_with_latency {
+ uint32_t num_levels;
+ struct dm_pp_clock_with_latency data[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_clock_with_voltage {
+ uint32_t clocks_in_khz;
+ uint32_t voltage_in_mv;
+};
+
+struct dm_pp_clock_levels_with_voltage {
+ uint32_t num_levels;
+ struct dm_pp_clock_with_voltage data[DM_PP_MAX_CLOCK_LEVELS];
+};
+
+struct dm_pp_single_disp_config {
+ enum signal_type signal;
+ uint8_t transmitter;
+ uint8_t ddi_channel_mapping;
+ uint8_t pipe_idx;
+ uint32_t src_height;
+ uint32_t src_width;
+ uint32_t v_refresh;
+ uint32_t sym_clock; /* HDMI only */
+ struct dc_link_settings link_settings; /* DP only */
+};
+
+#define MAX_WM_SETS 4
+
+enum dm_pp_wm_set_id {
+ WM_SET_A = 0,
+ WM_SET_B,
+ WM_SET_C,
+ WM_SET_D,
+ WM_SET_INVALID = 0xffff,
+};
+
+struct dm_pp_clock_range_for_wm_set {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_eng_clk_in_khz;
+ uint32_t wm_max_eng_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_wm_sets_with_clock_ranges {
+ uint32_t num_wm_sets;
+ struct dm_pp_clock_range_for_wm_set wm_clk_ranges[MAX_WM_SETS];
+};
+
+struct dm_pp_clock_range_for_dmif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_dcfclk_clk_in_khz;
+ uint32_t wm_max_dcfclk_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_clock_range_for_mcif_wm_set_soc15 {
+ enum dm_pp_wm_set_id wm_set_id;
+ uint32_t wm_min_socclk_clk_in_khz;
+ uint32_t wm_max_socclk_clk_in_khz;
+ uint32_t wm_min_memg_clk_in_khz;
+ uint32_t wm_max_mem_clk_in_khz;
+};
+
+struct dm_pp_wm_sets_with_clock_ranges_soc15 {
+ uint32_t num_wm_dmif_sets;
+ uint32_t num_wm_mcif_sets;
+ struct dm_pp_clock_range_for_dmif_wm_set_soc15
+ wm_dmif_clocks_ranges[MAX_WM_SETS];
+ struct dm_pp_clock_range_for_mcif_wm_set_soc15
+ wm_mcif_clocks_ranges[MAX_WM_SETS];
+};
+
+#define MAX_DISPLAY_CONFIGS 6
+
+struct dm_pp_display_configuration {
+ bool nb_pstate_switch_disable;/* controls NB PState switch */
+ bool cpu_cc6_disable; /* controls CPU CState switch ( on or off) */
+ bool cpu_pstate_disable;
+ uint32_t cpu_pstate_separation_time;
+
+ uint32_t min_memory_clock_khz;
+ uint32_t min_engine_clock_khz;
+ uint32_t min_engine_clock_deep_sleep_khz;
+
+ uint32_t avail_mclk_switch_time_us;
+ uint32_t avail_mclk_switch_time_in_disp_active_us;
+ uint32_t min_dcfclock_khz;
+ uint32_t min_dcfc_deep_sleep_clock_khz;
+
+ uint32_t disp_clk_khz;
+
+ bool all_displays_in_sync;
+
+ uint8_t display_count;
+ struct dm_pp_single_disp_config disp_configs[MAX_DISPLAY_CONFIGS];
+
+ /*Controller Index of primary display - used in MCLK SMC switching hang
+ * SW Workaround*/
+ uint8_t crtc_index;
+ /*htotal*1000/pixelclk - used in MCLK SMC switching hang SW Workaround*/
+ uint32_t line_time_in_us;
+};
+
+struct dm_bl_data_point {
+ /* Brightness level in percentage */
+ uint8_t luminance;
+ /* Brightness level as effective value in range 0-255,
+ * corresponding to above percentage
+ */
+ uint8_t signalLevel;
+};
+
+/* Total size of the structure should not exceed 256 bytes */
+struct dm_acpi_atif_backlight_caps {
+
+
+ uint16_t size; /* Bytes 0-1 (2 bytes) */
+ uint16_t flags; /* Byted 2-3 (2 bytes) */
+ uint8_t errorCode; /* Byte 4 */
+ uint8_t acLevelPercentage; /* Byte 5 */
+ uint8_t dcLevelPercentage; /* Byte 6 */
+ uint8_t minInputSignal; /* Byte 7 */
+ uint8_t maxInputSignal; /* Byte 8 */
+ uint8_t numOfDataPoints; /* Byte 9 */
+ struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+};
+
+enum dm_acpi_display_type {
+ AcpiDisplayType_LCD1 = 0,
+ AcpiDisplayType_CRT1 = 1,
+ AcpiDisplayType_DFP1 = 3,
+ AcpiDisplayType_CRT2 = 4,
+ AcpiDisplayType_LCD2 = 5,
+ AcpiDisplayType_DFP2 = 7,
+ AcpiDisplayType_DFP3 = 9,
+ AcpiDisplayType_DFP4 = 10,
+ AcpiDisplayType_DFP5 = 11,
+ AcpiDisplayType_DFP6 = 12
+};
+
+enum dm_pp_power_level {
+ DM_PP_POWER_LEVEL_INVALID,
+ DM_PP_POWER_LEVEL_ULTRA_LOW,
+ DM_PP_POWER_LEVEL_LOW,
+ DM_PP_POWER_LEVEL_NOMINAL,
+ DM_PP_POWER_LEVEL_PERFORMANCE,
+
+ DM_PP_POWER_LEVEL_0 = DM_PP_POWER_LEVEL_ULTRA_LOW,
+ DM_PP_POWER_LEVEL_1 = DM_PP_POWER_LEVEL_LOW,
+ DM_PP_POWER_LEVEL_2 = DM_PP_POWER_LEVEL_NOMINAL,
+ DM_PP_POWER_LEVEL_3 = DM_PP_POWER_LEVEL_PERFORMANCE,
+ DM_PP_POWER_LEVEL_4 = DM_PP_CLOCKS_DPM_STATE_LEVEL_3 + 1,
+ DM_PP_POWER_LEVEL_5 = DM_PP_CLOCKS_DPM_STATE_LEVEL_4 + 1,
+ DM_PP_POWER_LEVEL_6 = DM_PP_CLOCKS_DPM_STATE_LEVEL_5 + 1,
+ DM_PP_POWER_LEVEL_7 = DM_PP_CLOCKS_DPM_STATE_LEVEL_6 + 1,
+};
+
+struct dm_pp_power_level_change_request {
+ enum dm_pp_power_level power_level;
+};
+
+struct dm_pp_clock_for_voltage_req {
+ enum dm_pp_clock_type clk_type;
+ uint32_t clocks_in_khz;
+};
+
+struct dm_pp_static_clock_info {
+ uint32_t max_sclk_khz;
+ uint32_t max_mclk_khz;
+
+ /* max possible display block clocks state */
+ enum dm_pp_clocks_state max_clocks_state;
+};
+
+struct dtn_min_clk_info {
+ uint32_t disp_clk_khz;
+ uint32_t min_engine_clock_khz;
+ uint32_t min_memory_clock_khz;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
new file mode 100644
index 000000000000..87bab8e8139f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the 'utils' sub-component of DAL.
+# It provides the general basic services required by other DAL
+# subcomponents.
+
+CFLAGS_display_mode_vba.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_mode_lib.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_pipe_clocks.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dml1_display_rq_dlg_calc.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_display_rq_dlg_helpers.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_soc_bounding_box.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_dml_common_defs.o := -mhard-float -msse -mpreferred-stack-boundary=4
+
+
+DML = display_mode_lib.o display_rq_dlg_calc.o \
+ display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
+ soc_bounding_box.o dml_common_defs.o display_mode_vba.o
+
+AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
new file mode 100644
index 000000000000..ea4cde952f4f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DC_FEATURES_H__
+#define __DC_FEATURES_H__
+
+// local features
+#define DC__PRESENT 1
+#define DC__PRESENT__1 1
+#define DC__NUM_DPP 4
+#define DC__VOLTAGE_STATES 7
+#define DC__NUM_DPP__4 1
+#define DC__NUM_DPP__0_PRESENT 1
+#define DC__NUM_DPP__1_PRESENT 1
+#define DC__NUM_DPP__2_PRESENT 1
+#define DC__NUM_DPP__3_PRESENT 1
+#define DC__NUM_DPP__MAX 8
+#define DC__NUM_DPP__MAX__8 1
+#define DC__PIPE_10BIT 0
+#define DC__PIPE_10BIT__0 1
+#define DC__PIPE_10BIT__MAX 1
+#define DC__PIPE_10BIT__MAX__1 1
+#define DC__NUM_OPP 4
+#define DC__NUM_OPP__4 1
+#define DC__NUM_OPP__0_PRESENT 1
+#define DC__NUM_OPP__1_PRESENT 1
+#define DC__NUM_OPP__2_PRESENT 1
+#define DC__NUM_OPP__3_PRESENT 1
+#define DC__NUM_OPP__MAX 6
+#define DC__NUM_OPP__MAX__6 1
+#define DC__NUM_DSC 0
+#define DC__NUM_DSC__0 1
+#define DC__NUM_DSC__MAX 6
+#define DC__NUM_DSC__MAX__6 1
+#define DC__NUM_ABM 1
+#define DC__NUM_ABM__1 1
+#define DC__NUM_ABM__0_PRESENT 1
+#define DC__NUM_ABM__MAX 2
+#define DC__NUM_ABM__MAX__2 1
+#define DC__ODM_PRESENT 0
+#define DC__ODM_PRESENT__0 1
+#define DC__NUM_OTG 4
+#define DC__NUM_OTG__4 1
+#define DC__NUM_OTG__0_PRESENT 1
+#define DC__NUM_OTG__1_PRESENT 1
+#define DC__NUM_OTG__2_PRESENT 1
+#define DC__NUM_OTG__3_PRESENT 1
+#define DC__NUM_OTG__MAX 6
+#define DC__NUM_OTG__MAX__6 1
+#define DC__NUM_DWB 2
+#define DC__NUM_DWB__2 1
+#define DC__NUM_DWB__0_PRESENT 1
+#define DC__NUM_DWB__1_PRESENT 1
+#define DC__NUM_DWB__MAX 2
+#define DC__NUM_DWB__MAX__2 1
+#define DC__NUM_DIG 4
+#define DC__NUM_DIG__4 1
+#define DC__NUM_DIG__0_PRESENT 1
+#define DC__NUM_DIG__1_PRESENT 1
+#define DC__NUM_DIG__2_PRESENT 1
+#define DC__NUM_DIG__3_PRESENT 1
+#define DC__NUM_DIG__MAX 6
+#define DC__NUM_DIG__MAX__6 1
+#define DC__NUM_AUX 4
+#define DC__NUM_AUX__4 1
+#define DC__NUM_AUX__0_PRESENT 1
+#define DC__NUM_AUX__1_PRESENT 1
+#define DC__NUM_AUX__2_PRESENT 1
+#define DC__NUM_AUX__3_PRESENT 1
+#define DC__NUM_AUX__MAX 6
+#define DC__NUM_AUX__MAX__6 1
+#define DC__NUM_AUDIO_STREAMS 4
+#define DC__NUM_AUDIO_STREAMS__4 1
+#define DC__NUM_AUDIO_STREAMS__0_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__1_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__2_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__3_PRESENT 1
+#define DC__NUM_AUDIO_STREAMS__MAX 8
+#define DC__NUM_AUDIO_STREAMS__MAX__8 1
+#define DC__NUM_AUDIO_ENDPOINTS 6
+#define DC__NUM_AUDIO_ENDPOINTS__6 1
+#define DC__NUM_AUDIO_ENDPOINTS__0_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__1_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__2_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__3_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__4_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__5_PRESENT 1
+#define DC__NUM_AUDIO_ENDPOINTS__MAX 8
+#define DC__NUM_AUDIO_ENDPOINTS__MAX__8 1
+#define DC__NUM_AUDIO_INPUT_STREAMS 0
+#define DC__NUM_AUDIO_INPUT_STREAMS__0 1
+#define DC__NUM_AUDIO_INPUT_STREAMS__MAX 8
+#define DC__NUM_AUDIO_INPUT_STREAMS__MAX__8 1
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS 0
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__0 1
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__MAX 8
+#define DC__NUM_AUDIO_INPUT_ENDPOINTS__MAX__8 1
+#define DC__NUM_CURSOR 1
+#define DC__NUM_CURSOR__1 1
+#define DC__NUM_CURSOR__0_PRESENT 1
+#define DC__NUM_CURSOR__MAX 2
+#define DC__NUM_CURSOR__MAX__2 1
+#define DC__DIGITAL_BYPASS_PRESENT 0
+#define DC__DIGITAL_BYPASS_PRESENT__0 1
+#define DC__HCID_HWMAJVER 1
+#define DC__HCID_HWMAJVER__1 1
+#define DC__HCID_HWMINVER 0
+#define DC__HCID_HWMINVER__0 1
+#define DC__HCID_HWREV 0
+#define DC__HCID_HWREV__0 1
+#define DC__ROMSTRAP_PRESENT 0
+#define DC__ROMSTRAP_PRESENT__0 1
+#define DC__NUM_RBBMIF_DECODES 30
+#define DC__NUM_RBBMIF_DECODES__30 1
+#define DC__NUM_DBG_REGS 36
+#define DC__NUM_DBG_REGS__36 1
+#define DC__NUM_PIPES_UNDERLAY 0
+#define DC__NUM_PIPES_UNDERLAY__0 1
+#define DC__NUM_PIPES_UNDERLAY__MAX 2
+#define DC__NUM_PIPES_UNDERLAY__MAX__2 1
+#define DC__NUM_VCE_ENGINE 1
+#define DC__NUM_VCE_ENGINE__1 1
+#define DC__NUM_VCE_ENGINE__0_PRESENT 1
+#define DC__NUM_VCE_ENGINE__MAX 2
+#define DC__NUM_VCE_ENGINE__MAX__2 1
+#define DC__OTG_EXTERNAL_SYNC_PRESENT 0
+#define DC__OTG_EXTERNAL_SYNC_PRESENT__0 1
+#define DC__OTG_CRC_PRESENT 1
+#define DC__OTG_CRC_PRESENT__1 1
+#define DC__VIP_PRESENT 0
+#define DC__VIP_PRESENT__0 1
+#define DC__DTMTEST_PRESENT 0
+#define DC__DTMTEST_PRESENT__0 1
+#define DC__POWER_GATE_PRESENT 1
+#define DC__POWER_GATE_PRESENT__1 1
+#define DC__MEM_PG 1
+#define DC__MEM_PG__1 1
+#define DC__FMT_SRC_SEL_PRESENT 0
+#define DC__FMT_SRC_SEL_PRESENT__0 1
+#define DC__DIG_FEATURES__HDMI_PRESENT 1
+#define DC__DIG_FEATURES__HDMI_PRESENT__1 1
+#define DC__DIG_FEATURES__DP_PRESENT 1
+#define DC__DIG_FEATURES__DP_PRESENT__1 1
+#define DC__DIG_FEATURES__DP_MST_PRESENT 1
+#define DC__DIG_FEATURES__DP_MST_PRESENT__1 1
+#define DC__DIG_LP_FEATURES__HDMI_PRESENT 0
+#define DC__DIG_LP_FEATURES__HDMI_PRESENT__0 1
+#define DC__DIG_LP_FEATURES__DP_PRESENT 1
+#define DC__DIG_LP_FEATURES__DP_PRESENT__1 1
+#define DC__DIG_LP_FEATURES__DP_MST_PRESENT 0
+#define DC__DIG_LP_FEATURES__DP_MST_PRESENT__0 1
+#define DC__DIG_RESYNC_FIFO_SIZE 14
+#define DC__DIG_RESYNC_FIFO_SIZE__14 1
+#define DC__DIG_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__12_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__13_PRESENT 1
+#define DC__DIG_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DIG_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__DAC_RESYNC_FIFO_SIZE 12
+#define DC__DAC_RESYNC_FIFO_SIZE__12 1
+#define DC__DAC_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DAC_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DAC_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__DVO_RESYNC_FIFO_SIZE 12
+#define DC__DVO_RESYNC_FIFO_SIZE__12 1
+#define DC__DVO_RESYNC_FIFO_SIZE__0_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__1_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__2_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__3_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__4_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__5_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__6_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__7_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__8_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__9_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__10_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__11_PRESENT 1
+#define DC__DVO_RESYNC_FIFO_SIZE__MAX 16
+#define DC__DVO_RESYNC_FIFO_SIZE__MAX__16 1
+#define DC__MEM_CDC_PRESENT 1
+#define DC__MEM_CDC_PRESENT__1 1
+#define DC__NUM_HPD 4
+#define DC__NUM_HPD__4 1
+#define DC__NUM_HPD__0_PRESENT 1
+#define DC__NUM_HPD__1_PRESENT 1
+#define DC__NUM_HPD__2_PRESENT 1
+#define DC__NUM_HPD__3_PRESENT 1
+#define DC__NUM_HPD__MAX 6
+#define DC__NUM_HPD__MAX__6 1
+#define DC__NUM_DDC_PAIRS 4
+#define DC__NUM_DDC_PAIRS__4 1
+#define DC__NUM_DDC_PAIRS__0_PRESENT 1
+#define DC__NUM_DDC_PAIRS__1_PRESENT 1
+#define DC__NUM_DDC_PAIRS__2_PRESENT 1
+#define DC__NUM_DDC_PAIRS__3_PRESENT 1
+#define DC__NUM_DDC_PAIRS__MAX 6
+#define DC__NUM_DDC_PAIRS__MAX__6 1
+#define DC__NUM_AUDIO_PLL 0
+#define DC__NUM_AUDIO_PLL__0 1
+#define DC__NUM_AUDIO_PLL__MAX 2
+#define DC__NUM_AUDIO_PLL__MAX__2 1
+#define DC__NUM_PIXEL_PLL 1
+#define DC__NUM_PIXEL_PLL__1 1
+#define DC__NUM_PIXEL_PLL__0_PRESENT 1
+#define DC__NUM_PIXEL_PLL__MAX 4
+#define DC__NUM_PIXEL_PLL__MAX__4 1
+#define DC__NUM_CASCADED_PLL 0
+#define DC__NUM_CASCADED_PLL__0 1
+#define DC__NUM_CASCADED_PLL__MAX 3
+#define DC__NUM_CASCADED_PLL__MAX__3 1
+#define DC__PIXCLK_FROM_PHYPLL 1
+#define DC__PIXCLK_FROM_PHYPLL__1 1
+#define DC__NB_STUTTER_MODE_PRESENT 0
+#define DC__NB_STUTTER_MODE_PRESENT__0 1
+#define DC__I2S0_AND_SPDIF0_PRESENT 0
+#define DC__I2S0_AND_SPDIF0_PRESENT__0 1
+#define DC__I2S1_PRESENT 0
+#define DC__I2S1_PRESENT__0 1
+#define DC__SPDIF1_PRESENT 0
+#define DC__SPDIF1_PRESENT__0 1
+#define DC__DSI_PRESENT 0
+#define DC__DSI_PRESENT__0 1
+#define DC__DACA_PRESENT 0
+#define DC__DACA_PRESENT__0 1
+#define DC__DACB_PRESENT 0
+#define DC__DACB_PRESENT__0 1
+#define DC__NUM_PIPES 4
+#define DC__NUM_PIPES__4 1
+#define DC__NUM_PIPES__0_PRESENT 1
+#define DC__NUM_PIPES__1_PRESENT 1
+#define DC__NUM_PIPES__2_PRESENT 1
+#define DC__NUM_PIPES__3_PRESENT 1
+#define DC__NUM_PIPES__MAX 6
+#define DC__NUM_PIPES__MAX__6 1
+#define DC__NUM_DIG_LP 0
+#define DC__NUM_DIG_LP__0 1
+#define DC__NUM_DIG_LP__MAX 2
+#define DC__NUM_DIG_LP__MAX__2 1
+#define DC__DPDEBUG_PRESENT 0
+#define DC__DPDEBUG_PRESENT__0 1
+#define DC__DISPLAY_WB_PRESENT 1
+#define DC__DISPLAY_WB_PRESENT__1 1
+#define DC__NUM_CWB 0
+#define DC__NUM_CWB__0 1
+#define DC__NUM_CWB__MAX 2
+#define DC__NUM_CWB__MAX__2 1
+#define DC__MVP_PRESENT 0
+#define DC__MVP_PRESENT__0 1
+#define DC__DVO_PRESENT 0
+#define DC__DVO_PRESENT__0 1
+#define DC__ABM_PRESENT 0
+#define DC__ABM_PRESENT__0 1
+#define DC__BPHYC_PLL_PRESENT 0
+#define DC__BPHYC_PLL_PRESENT__0 1
+#define DC__BPHYC_UNIPHY_PRESENT 0
+#define DC__BPHYC_UNIPHY_PRESENT__0 1
+#define DC__PHY_BROADCAST_PRESENT 0
+#define DC__PHY_BROADCAST_PRESENT__0 1
+#define DC__NUM_OF_DCRX_SD 0
+#define DC__NUM_OF_DCRX_SD__0 1
+#define DC__DVO_17BIT_MAPPING 0
+#define DC__DVO_17BIT_MAPPING__0 1
+#define DC__AVSYNC_PRESENT 0
+#define DC__AVSYNC_PRESENT__0 1
+#define DC__NUM_OF_DCRX_PORTS 0
+#define DC__NUM_OF_DCRX_PORTS__0 1
+#define DC__NUM_OF_DCRX_PORTS__MAX 1
+#define DC__NUM_OF_DCRX_PORTS__MAX__1 1
+#define DC__NUM_PHY 4
+#define DC__NUM_PHY__4 1
+#define DC__NUM_PHY__0_PRESENT 1
+#define DC__NUM_PHY__1_PRESENT 1
+#define DC__NUM_PHY__2_PRESENT 1
+#define DC__NUM_PHY__3_PRESENT 1
+#define DC__NUM_PHY__MAX 7
+#define DC__NUM_PHY__MAX__7 1
+#define DC__NUM_PHY_LP 0
+#define DC__NUM_PHY_LP__0 1
+#define DC__NUM_PHY_LP__MAX 2
+#define DC__NUM_PHY_LP__MAX__2 1
+#define DC__SYNC_CELL vid_sync_gf14lpp
+#define DC__SYNC_CELL__VID_SYNC_GF14LPP 1
+#define DC__USE_NEW_VSS 1
+#define DC__USE_NEW_VSS__1 1
+#define DC__SYNC_CELL_DISPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DISPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DVOCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DVOCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_PIXCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_PIXCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_SYMCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_SYMCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DPPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DPPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DPREFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DPREFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_REFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_REFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_PCIE_REFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_PCIE_REFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_MVPCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_MVPCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_SCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_SCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DCEFCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DCEFCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_AMCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_AMCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DSICLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DSICLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_BYTECLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_BYTECLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_ESCCLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_ESCCLK_NUM_LATCHES__6 1
+#define DC__SYNC_CELL_DB_CLK_NUM_LATCHES 6
+#define DC__SYNC_CELL_DB_CLK_NUM_LATCHES__6 1
+#define UNIPHYA_PRESENT 1
+#define UNIPHYA_PRESENT__1 1
+#define DC__UNIPHYA_PRESENT 1
+#define DC__UNIPHYA_PRESENT__1 1
+#define UNIPHYB_PRESENT 1
+#define UNIPHYB_PRESENT__1 1
+#define DC__UNIPHYB_PRESENT 1
+#define DC__UNIPHYB_PRESENT__1 1
+#define UNIPHYC_PRESENT 1
+#define UNIPHYC_PRESENT__1 1
+#define DC__UNIPHYC_PRESENT 1
+#define DC__UNIPHYC_PRESENT__1 1
+#define UNIPHYD_PRESENT 1
+#define UNIPHYD_PRESENT__1 1
+#define DC__UNIPHYD_PRESENT 1
+#define DC__UNIPHYD_PRESENT__1 1
+#define UNIPHYE_PRESENT 0
+#define UNIPHYE_PRESENT__0 1
+#define DC__UNIPHYE_PRESENT 0
+#define DC__UNIPHYE_PRESENT__0 1
+#define UNIPHYF_PRESENT 0
+#define UNIPHYF_PRESENT__0 1
+#define DC__UNIPHYF_PRESENT 0
+#define DC__UNIPHYF_PRESENT__0 1
+#define UNIPHYG_PRESENT 0
+#define UNIPHYG_PRESENT__0 1
+#define DC__UNIPHYG_PRESENT 0
+#define DC__UNIPHYG_PRESENT__0 1
+#define DC__TMDS_LINK tmds_link_dual
+#define DC__TMDS_LINK__TMDS_LINK_DUAL 1
+#define DC__WBSCL_PIXBW 8
+#define DC__WBSCL_PIXBW__8 1
+#define DC__DWB_CSC_PRESENT 0
+#define DC__DWB_CSC_PRESENT__0 1
+#define DC__DWB_LUMA_SCL_PRESENT 0
+#define DC__DWB_LUMA_SCL_PRESENT__0 1
+#define DC__DENTIST_INTERFACE_PRESENT 1
+#define DC__DENTIST_INTERFACE_PRESENT__1 1
+#define DC__GENERICA_PRESENT 1
+#define DC__GENERICA_PRESENT__1 1
+#define DC__GENERICB_PRESENT 1
+#define DC__GENERICB_PRESENT__1 1
+#define DC__GENERICC_PRESENT 0
+#define DC__GENERICC_PRESENT__0 1
+#define DC__GENERICD_PRESENT 0
+#define DC__GENERICD_PRESENT__0 1
+#define DC__GENERICE_PRESENT 0
+#define DC__GENERICE_PRESENT__0 1
+#define DC__GENERICF_PRESENT 0
+#define DC__GENERICF_PRESENT__0 1
+#define DC__GENERICG_PRESENT 0
+#define DC__GENERICG_PRESENT__0 1
+#define DC__UNIPHY_VOLTAGE_MODE 1
+#define DC__UNIPHY_VOLTAGE_MODE__1 1
+#define DC__BLON_TYPE dedicated
+#define DC__BLON_TYPE__DEDICATED 1
+#define DC__UNIPHY_STAGGER_CH_PRESENT 1
+#define DC__UNIPHY_STAGGER_CH_PRESENT__1 1
+#define DC__XDMA_PRESENT 0
+#define DC__XDMA_PRESENT__0 1
+#define XDMA__PRESENT 0
+#define XDMA__PRESENT__0 1
+#define DC__DP_MEM_PG 0
+#define DC__DP_MEM_PG__0 1
+#define DP__MEM_PG 0
+#define DP__MEM_PG__0 1
+#define DC__AFMT_MEM_PG 0
+#define DC__AFMT_MEM_PG__0 1
+#define AFMT__MEM_PG 0
+#define AFMT__MEM_PG__0 1
+#define DC__HDMI_MEM_PG 0
+#define DC__HDMI_MEM_PG__0 1
+#define HDMI__MEM_PG 0
+#define HDMI__MEM_PG__0 1
+#define DC__I2C_MEM_PG 0
+#define DC__I2C_MEM_PG__0 1
+#define I2C__MEM_PG 0
+#define I2C__MEM_PG__0 1
+#define DC__DSCL_MEM_PG 0
+#define DC__DSCL_MEM_PG__0 1
+#define DSCL__MEM_PG 0
+#define DSCL__MEM_PG__0 1
+#define DC__CM_MEM_PG 0
+#define DC__CM_MEM_PG__0 1
+#define CM__MEM_PG 0
+#define CM__MEM_PG__0 1
+#define DC__OBUF_MEM_PG 0
+#define DC__OBUF_MEM_PG__0 1
+#define OBUF__MEM_PG 0
+#define OBUF__MEM_PG__0 1
+#define DC__WBIF_MEM_PG 1
+#define DC__WBIF_MEM_PG__1 1
+#define WBIF__MEM_PG 1
+#define WBIF__MEM_PG__1 1
+#define DC__VGA_MEM_PG 0
+#define DC__VGA_MEM_PG__0 1
+#define VGA__MEM_PG 0
+#define VGA__MEM_PG__0 1
+#define DC__FMT_MEM_PG 0
+#define DC__FMT_MEM_PG__0 1
+#define FMT__MEM_PG 0
+#define FMT__MEM_PG__0 1
+#define DC__ODM_MEM_PG 0
+#define DC__ODM_MEM_PG__0 1
+#define ODM__MEM_PG 0
+#define ODM__MEM_PG__0 1
+#define DC__DSI_MEM_PG 0
+#define DC__DSI_MEM_PG__0 1
+#define DSI__MEM_PG 0
+#define DSI__MEM_PG__0 1
+#define DC__AZ_MEM_PG 1
+#define DC__AZ_MEM_PG__1 1
+#define AZ__MEM_PG 1
+#define AZ__MEM_PG__1 1
+#define DC__WBSCL_MEM1P1024X64QS_MEM_PG 1
+#define DC__WBSCL_MEM1P1024X64QS_MEM_PG__1 1
+#define WBSCL_MEM1P1024X64QS__MEM_PG 1
+#define WBSCL_MEM1P1024X64QS__MEM_PG__1 1
+#define DC__WBSCL_MEM1P528X64QS_MEM_PG 1
+#define DC__WBSCL_MEM1P528X64QS_MEM_PG__1 1
+#define WBSCL_MEM1P528X64QS__MEM_PG 1
+#define WBSCL_MEM1P528X64QS__MEM_PG__1 1
+#define DC__DMCU_MEM1P1024X32BQS_MEM_PG 1
+#define DC__DMCU_MEM1P1024X32BQS_MEM_PG__1 1
+#define DMCU_MEM1P1024X32BQS__MEM_PG 1
+#define DMCU_MEM1P1024X32BQS__MEM_PG__1 1
+#define DC__HUBBUB_SDP_TAG_INT_MEM_PG 0
+#define DC__HUBBUB_SDP_TAG_INT_MEM_PG__0 1
+#define HUBBUB_SDP_TAG_INT__MEM_PG 0
+#define HUBBUB_SDP_TAG_INT__MEM_PG__0 1
+#define DC__HUBBUB_SDP_TAG_EXT_MEM_PG 0
+#define DC__HUBBUB_SDP_TAG_EXT_MEM_PG__0 1
+#define HUBBUB_SDP_TAG_EXT__MEM_PG 0
+#define HUBBUB_SDP_TAG_EXT__MEM_PG__0 1
+#define DC__HUBBUB_RET_ZERO_MEM_PG 0
+#define DC__HUBBUB_RET_ZERO_MEM_PG__0 1
+#define HUBBUB_RET_ZERO__MEM_PG 0
+#define HUBBUB_RET_ZERO__MEM_PG__0 1
+#define DC__HUBBUB_RET_ROB_MEM_PG 0
+#define DC__HUBBUB_RET_ROB_MEM_PG__0 1
+#define HUBBUB_RET_ROB__MEM_PG 0
+#define HUBBUB_RET_ROB__MEM_PG__0 1
+#define DC__HUBPRET_CUR_ROB_MEM_PG 0
+#define DC__HUBPRET_CUR_ROB_MEM_PG__0 1
+#define HUBPRET_CUR_ROB__MEM_PG 0
+#define HUBPRET_CUR_ROB__MEM_PG__0 1
+#define DC__HUBPRET_CUR_CDC_MEM_PG 0
+#define DC__HUBPRET_CUR_CDC_MEM_PG__0 1
+#define HUBPRET_CUR_CDC__MEM_PG 0
+#define HUBPRET_CUR_CDC__MEM_PG__0 1
+#define DC__HUBPREQ_MPTE_MEM_PG 0
+#define DC__HUBPREQ_MPTE_MEM_PG__0 1
+#define HUBPREQ_MPTE__MEM_PG 0
+#define HUBPREQ_MPTE__MEM_PG__0 1
+#define DC__HUBPREQ_META_MEM_PG 0
+#define DC__HUBPREQ_META_MEM_PG__0 1
+#define HUBPREQ_META__MEM_PG 0
+#define HUBPREQ_META__MEM_PG__0 1
+#define DC__HUBPREQ_DPTE_MEM_PG 0
+#define DC__HUBPREQ_DPTE_MEM_PG__0 1
+#define HUBPREQ_DPTE__MEM_PG 0
+#define HUBPREQ_DPTE__MEM_PG__0 1
+#define DC__HUBPRET_DET_MEM_PG 0
+#define DC__HUBPRET_DET_MEM_PG__0 1
+#define HUBPRET_DET__MEM_PG 0
+#define HUBPRET_DET__MEM_PG__0 1
+#define DC__HUBPRET_PIX_CDC_MEM_PG 0
+#define DC__HUBPRET_PIX_CDC_MEM_PG__0 1
+#define HUBPRET_PIX_CDC__MEM_PG 0
+#define HUBPRET_PIX_CDC__MEM_PG__0 1
+#define DC__TOP_BLKS__DCCG 1
+#define DC__TOP_BLKS__DCHUBBUB 1
+#define DC__TOP_BLKS__DCHUBP 1
+#define DC__TOP_BLKS__HDA 1
+#define DC__TOP_BLKS__DIO 1
+#define DC__TOP_BLKS__DCIO 1
+#define DC__TOP_BLKS__DMU 1
+#define DC__TOP_BLKS__DPP 1
+#define DC__TOP_BLKS__MPC 1
+#define DC__TOP_BLKS__OPP 1
+#define DC__TOP_BLKS__OPTC 1
+#define DC__TOP_BLKS__MMHUBBUB 1
+#define DC__TOP_BLKS__WB 1
+#define DC__TOP_BLKS__MAX 13
+#define DC__TOP_BLKS__MAX__13 1
+#define DC__DCHUBP_DPP_SF_PIXEL_CREDITS 9
+#define DC__DCHUBP_DPP_SF_PIXEL_CREDITS__9 1
+#define DC__DPP_MPC_SF_PIXEL_CREDITS 9
+#define DC__DPP_MPC_SF_PIXEL_CREDITS__9 1
+#define DC__MPC_OPP_SF_PIXEL_CREDITS 8
+#define DC__MPC_OPP_SF_PIXEL_CREDITS__8 1
+#define DC__OPP_OPTC_SF_PIXEL_CREDITS 8
+#define DC__OPP_OPTC_SF_PIXEL_CREDITS__8 1
+#define DC__SFR_SFT_ROUND_TRIP_DELAY 5
+#define DC__SFR_SFT_ROUND_TRIP_DELAY__5 1
+#define DC__REPEATER_PROJECT_MAX 8
+#define DC__REPEATER_PROJECT_MAX__8 1
+#define DC__SURFACE_422_CAPABLE 0
+#define DC__SURFACE_422_CAPABLE__0 1
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
new file mode 100644
index 000000000000..b1ad3553f900
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_ENUMS_H__
+#define __DISPLAY_MODE_ENUMS_H__
+
+enum output_encoder_class {
+ dm_dp = 0, dm_hdmi = 1, dm_wb = 2, dm_edp
+};
+enum output_format_class {
+ dm_444 = 0, dm_420 = 1, dm_n422, dm_s422
+};
+enum source_format_class {
+ dm_444_16 = 0,
+ dm_444_32 = 1,
+ dm_444_64 = 2,
+ dm_420_8 = 3,
+ dm_420_10 = 4,
+ dm_422_8 = 5,
+ dm_422_10 = 6,
+ dm_444_8 = 7,
+ dm_mono_8,
+ dm_mono_16
+};
+enum output_bpc_class {
+ dm_out_6 = 0, dm_out_8 = 1, dm_out_10 = 2, dm_out_12 = 3, dm_out_16 = 4
+};
+enum scan_direction_class {
+ dm_horz = 0, dm_vert = 1
+};
+enum dm_swizzle_mode {
+ dm_sw_linear = 0,
+ dm_sw_256b_s = 1,
+ dm_sw_256b_d = 2,
+ dm_sw_SPARE_0 = 3,
+ dm_sw_SPARE_1 = 4,
+ dm_sw_4kb_s = 5,
+ dm_sw_4kb_d = 6,
+ dm_sw_SPARE_2 = 7,
+ dm_sw_SPARE_3 = 8,
+ dm_sw_64kb_s = 9,
+ dm_sw_64kb_d = 10,
+ dm_sw_SPARE_4 = 11,
+ dm_sw_SPARE_5 = 12,
+ dm_sw_var_s = 13,
+ dm_sw_var_d = 14,
+ dm_sw_SPARE_6 = 15,
+ dm_sw_SPARE_7 = 16,
+ dm_sw_64kb_s_t = 17,
+ dm_sw_64kb_d_t = 18,
+ dm_sw_SPARE_10 = 19,
+ dm_sw_SPARE_11 = 20,
+ dm_sw_4kb_s_x = 21,
+ dm_sw_4kb_d_x = 22,
+ dm_sw_SPARE_12 = 23,
+ dm_sw_SPARE_13 = 24,
+ dm_sw_64kb_s_x = 25,
+ dm_sw_64kb_d_x = 26,
+ dm_sw_SPARE_14 = 27,
+ dm_sw_SPARE_15 = 28,
+ dm_sw_var_s_x = 29,
+ dm_sw_var_d_x = 30,
+ dm_sw_64kb_r_x,
+ dm_sw_gfx7_2d_thin_lvp,
+ dm_sw_gfx7_2d_thin_gl
+};
+enum lb_depth {
+ dm_lb_10 = 0, dm_lb_8 = 1, dm_lb_6 = 2, dm_lb_12 = 3, dm_lb_16
+};
+enum voltage_state {
+ dm_vmin = 0, dm_vmid = 1, dm_vnom = 2, dm_vmax = 3
+};
+enum source_macro_tile_size {
+ dm_4k_tile = 0, dm_64k_tile = 1, dm_256k_tile = 2
+};
+enum cursor_bpp {
+ dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2
+};
+enum clock_change_support {
+ dm_dram_clock_change_uninitialized = 0,
+ dm_dram_clock_change_vactive,
+ dm_dram_clock_change_vblank,
+ dm_dram_clock_change_unsupported
+};
+
+enum output_standard {
+ dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
new file mode 100644
index 000000000000..4c31fa54af39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "dc_features.h"
+
+static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
+{
+ if (project == DML_PROJECT_RAVEN1) {
+ soc->sr_exit_time_us = 9.0;
+ soc->sr_enter_plus_exit_time_us = 11.0;
+ soc->urgent_latency_us = 4.0;
+ soc->writeback_latency_us = 12.0;
+ soc->ideal_dram_bw_after_urgent_percent = 80.0;
+ soc->max_request_size_bytes = 256;
+
+ soc->vmin.dcfclk_mhz = 300.0;
+ soc->vmin.dispclk_mhz = 608.0;
+ soc->vmin.dppclk_mhz = 435.0;
+ soc->vmin.dram_bw_per_chan_gbps = 12.8;
+ soc->vmin.phyclk_mhz = 540.0;
+ soc->vmin.socclk_mhz = 208.0;
+
+ soc->vmid.dcfclk_mhz = 600.0;
+ soc->vmid.dispclk_mhz = 661.0;
+ soc->vmid.dppclk_mhz = 661.0;
+ soc->vmid.dram_bw_per_chan_gbps = 12.8;
+ soc->vmid.phyclk_mhz = 540.0;
+ soc->vmid.socclk_mhz = 208.0;
+
+ soc->vnom.dcfclk_mhz = 600.0;
+ soc->vnom.dispclk_mhz = 661.0;
+ soc->vnom.dppclk_mhz = 661.0;
+ soc->vnom.dram_bw_per_chan_gbps = 38.4;
+ soc->vnom.phyclk_mhz = 810;
+ soc->vnom.socclk_mhz = 208.0;
+
+ soc->vmax.dcfclk_mhz = 600.0;
+ soc->vmax.dispclk_mhz = 1086.0;
+ soc->vmax.dppclk_mhz = 661.0;
+ soc->vmax.dram_bw_per_chan_gbps = 38.4;
+ soc->vmax.phyclk_mhz = 810.0;
+ soc->vmax.socclk_mhz = 208.0;
+
+ soc->downspread_percent = 0.5;
+ soc->dram_page_open_time_ns = 50.0;
+ soc->dram_rw_turnaround_time_ns = 17.5;
+ soc->dram_return_buffer_per_channel_bytes = 8192;
+ soc->round_trip_ping_latency_dcfclk_cycles = 128;
+ soc->urgent_out_of_order_return_per_channel_bytes = 256;
+ soc->channel_interleave_bytes = 256;
+ soc->num_banks = 8;
+ soc->num_chans = 2;
+ soc->vmm_page_size_bytes = 4096;
+ soc->dram_clock_change_latency_us = 17.0;
+ soc->writeback_dram_clock_change_latency_us = 23.0;
+ soc->return_bus_width_bytes = 64;
+ } else {
+ BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ }
+}
+
+static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+{
+ if (project == DML_PROJECT_RAVEN1) {
+ ip->rob_buffer_size_kbytes = 64;
+ ip->det_buffer_size_kbytes = 164;
+ ip->dpte_buffer_size_in_pte_reqs = 42;
+ ip->dpp_output_buffer_pixels = 2560;
+ ip->opp_output_buffer_lines = 1;
+ ip->pixel_chunk_size_kbytes = 8;
+ ip->pte_enable = 1;
+ ip->pte_chunk_size_kbytes = 2;
+ ip->meta_chunk_size_kbytes = 2;
+ ip->writeback_chunk_size_kbytes = 2;
+ ip->line_buffer_size_bits = 589824;
+ ip->max_line_buffer_lines = 12;
+ ip->IsLineBufferBppFixed = 0;
+ ip->LineBufferFixedBpp = -1;
+ ip->writeback_luma_buffer_size_kbytes = 12;
+ ip->writeback_chroma_buffer_size_kbytes = 8;
+ ip->max_num_dpp = 4;
+ ip->max_num_wb = 2;
+ ip->max_dchub_pscl_bw_pix_per_clk = 4;
+ ip->max_pscl_lb_bw_pix_per_clk = 2;
+ ip->max_lb_vscl_bw_pix_per_clk = 4;
+ ip->max_vscl_hscl_bw_pix_per_clk = 4;
+ ip->max_hscl_ratio = 4;
+ ip->max_vscl_ratio = 4;
+ ip->hscl_mults = 4;
+ ip->vscl_mults = 4;
+ ip->max_hscl_taps = 8;
+ ip->max_vscl_taps = 8;
+ ip->dispclk_ramp_margin_percent = 1;
+ ip->underscan_factor = 1.10;
+ ip->min_vblank_lines = 14;
+ ip->dppclk_delay_subtotal = 90;
+ ip->dispclk_delay_subtotal = 42;
+ ip->dcfclk_cstate_latency = 10;
+ ip->max_inter_dcn_tile_repeaters = 8;
+ ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0;
+ ip->bug_forcing_LC_req_same_size_fixed = 0;
+ } else {
+ BREAK_TO_DEBUGGER(); /* Invalid Project Specified */
+ }
+}
+
+void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
+{
+ if (lib->project != project) {
+ set_soc_bounding_box(&lib->soc, project);
+ set_ip_params(&lib->ip, project);
+ lib->project = project;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
new file mode 100644
index 000000000000..26f4f2a3d90d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_LIB_H__
+#define __DISPLAY_MODE_LIB_H__
+
+
+#include "dml_common_defs.h"
+#include "soc_bounding_box.h"
+#include "display_mode_vba.h"
+#include "display_rq_dlg_calc.h"
+#include "dml1_display_rq_dlg_calc.h"
+
+enum dml_project {
+ DML_PROJECT_UNDEFINED,
+ DML_PROJECT_RAVEN1
+};
+
+struct display_mode_lib {
+ struct _vcs_dpi_ip_params_st ip;
+ struct _vcs_dpi_soc_bounding_box_st soc;
+ enum dml_project project;
+ struct vba_vars_st vba;
+ struct dal_logger *logger;
+};
+
+void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
new file mode 100644
index 000000000000..baf182177736
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -0,0 +1,557 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DISPLAY_MODE_STRUCTS_H__
+#define __DISPLAY_MODE_STRUCTS_H__
+
+typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
+typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
+typedef struct _vcs_dpi_ip_params_st ip_params_st;
+typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st;
+typedef struct _vcs_dpi_display_output_params_st display_output_params_st;
+typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st;
+typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st;
+typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st;
+typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st;
+typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st;
+typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st;
+typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st;
+typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st;
+typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st;
+typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st;
+typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st;
+typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st;
+typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st;
+typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st;
+typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st;
+typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st;
+typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st;
+typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st;
+typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st;
+typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st;
+typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st;
+typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st;
+typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st;
+typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st;
+typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st;
+
+struct _vcs_dpi_voltage_scaling_st {
+ int state;
+ double dscclk_mhz;
+ double dcfclk_mhz;
+ double socclk_mhz;
+ double dram_speed_mhz;
+ double fabricclk_mhz;
+ double dispclk_mhz;
+ double dram_bw_per_chan_gbps;
+ double phyclk_mhz;
+ double dppclk_mhz;
+};
+
+struct _vcs_dpi_soc_bounding_box_st {
+ double sr_exit_time_us;
+ double sr_enter_plus_exit_time_us;
+ double urgent_latency_us;
+ double writeback_latency_us;
+ double ideal_dram_bw_after_urgent_percent;
+ unsigned int max_request_size_bytes;
+ struct _vcs_dpi_voltage_scaling_st vmin;
+ struct _vcs_dpi_voltage_scaling_st vmid;
+ struct _vcs_dpi_voltage_scaling_st vnom;
+ struct _vcs_dpi_voltage_scaling_st vmax;
+ double downspread_percent;
+ double dram_page_open_time_ns;
+ double dram_rw_turnaround_time_ns;
+ double dram_return_buffer_per_channel_bytes;
+ double dram_channel_width_bytes;
+ double fabric_datapath_to_dcn_data_return_bytes;
+ double dcn_downspread_percent;
+ double dispclk_dppclk_vco_speed_mhz;
+ double dfs_vco_period_ps;
+ unsigned int round_trip_ping_latency_dcfclk_cycles;
+ unsigned int urgent_out_of_order_return_per_channel_bytes;
+ unsigned int channel_interleave_bytes;
+ unsigned int num_banks;
+ unsigned int num_chans;
+ unsigned int vmm_page_size_bytes;
+ double dram_clock_change_latency_us;
+ double writeback_dram_clock_change_latency_us;
+ unsigned int return_bus_width_bytes;
+ unsigned int voltage_override;
+ double xfc_bus_transport_time_us;
+ double xfc_xbuf_latency_tolerance_us;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+};
+
+struct _vcs_dpi_ip_params_st {
+ unsigned int max_inter_dcn_tile_repeaters;
+ unsigned int num_dsc;
+ unsigned int odm_capable;
+ unsigned int rob_buffer_size_kbytes;
+ unsigned int det_buffer_size_kbytes;
+ unsigned int dpte_buffer_size_in_pte_reqs;
+ unsigned int pde_proc_buffer_size_64k_reqs;
+ unsigned int dpp_output_buffer_pixels;
+ unsigned int opp_output_buffer_lines;
+ unsigned int pixel_chunk_size_kbytes;
+ unsigned char pte_enable;
+ unsigned int pte_chunk_size_kbytes;
+ unsigned int meta_chunk_size_kbytes;
+ unsigned int writeback_chunk_size_kbytes;
+ unsigned int line_buffer_size_bits;
+ unsigned int max_line_buffer_lines;
+ unsigned int writeback_luma_buffer_size_kbytes;
+ unsigned int writeback_chroma_buffer_size_kbytes;
+ unsigned int writeback_chroma_line_buffer_width_pixels;
+ unsigned int max_page_table_levels;
+ unsigned int max_num_dpp;
+ unsigned int max_num_otg;
+ unsigned int cursor_chunk_size;
+ unsigned int cursor_buffer_size;
+ unsigned int max_num_wb;
+ unsigned int max_dchub_pscl_bw_pix_per_clk;
+ unsigned int max_pscl_lb_bw_pix_per_clk;
+ unsigned int max_lb_vscl_bw_pix_per_clk;
+ unsigned int max_vscl_hscl_bw_pix_per_clk;
+ double max_hscl_ratio;
+ double max_vscl_ratio;
+ unsigned int hscl_mults;
+ unsigned int vscl_mults;
+ unsigned int max_hscl_taps;
+ unsigned int max_vscl_taps;
+ unsigned int xfc_supported;
+ unsigned int ptoi_supported;
+ unsigned int xfc_fill_constant_bytes;
+ double dispclk_ramp_margin_percent;
+ double xfc_fill_bw_overhead_percent;
+ double underscan_factor;
+ unsigned int min_vblank_lines;
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int dcfclk_cstate_latency;
+ unsigned int dppclk_delay_scl;
+ unsigned int dppclk_delay_scl_lb_only;
+ unsigned int dppclk_delay_cnvc_formatter;
+ unsigned int dppclk_delay_cnvc_cursor;
+ unsigned int is_line_buffer_bpp_fixed;
+ unsigned int line_buffer_fixed_bpp;
+ unsigned int dcc_supported;
+
+ unsigned int IsLineBufferBppFixed;
+ unsigned int LineBufferFixedBpp;
+ unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ unsigned int bug_forcing_LC_req_same_size_fixed;
+};
+
+struct _vcs_dpi_display_xfc_params_st {
+ double xfc_tslv_vready_offset_us;
+ double xfc_tslv_vupdate_width_us;
+ double xfc_tslv_vupdate_offset_us;
+ int xfc_slv_chunk_size_bytes;
+};
+
+struct _vcs_dpi_display_pipe_source_params_st {
+ int source_format;
+ unsigned char dcc;
+ unsigned int dcc_override;
+ unsigned int dcc_rate;
+ unsigned char dcc_use_global;
+ unsigned char vm;
+ unsigned char vm_levels_force_en;
+ unsigned int vm_levels_force;
+ int source_scan;
+ int sw_mode;
+ int macro_tile_size;
+ unsigned char is_display_sw;
+ unsigned int viewport_width;
+ unsigned int viewport_height;
+ unsigned int viewport_y_y;
+ unsigned int viewport_y_c;
+ unsigned int viewport_width_c;
+ unsigned int viewport_height_c;
+ unsigned int data_pitch;
+ unsigned int data_pitch_c;
+ unsigned int meta_pitch;
+ unsigned int meta_pitch_c;
+ unsigned int cur0_src_width;
+ int cur0_bpp;
+ unsigned int cur1_src_width;
+ int cur1_bpp;
+ int num_cursors;
+ unsigned char is_hsplit;
+ unsigned char dynamic_metadata_enable;
+ unsigned int dynamic_metadata_lines_before_active;
+ unsigned int dynamic_metadata_xmit_bytes;
+ unsigned int hsplit_grp;
+ unsigned char xfc_enable;
+ unsigned char xfc_slave;
+ struct _vcs_dpi_display_xfc_params_st xfc_params;
+};
+struct writeback_st {
+ int wb_src_height;
+ int wb_dst_width;
+ int wb_dst_height;
+ int wb_pixel_format;
+ int wb_htaps_luma;
+ int wb_vtaps_luma;
+ int wb_htaps_chroma;
+ int wb_vtaps_chroma;
+ int wb_hratio;
+ int wb_vratio;
+};
+
+struct _vcs_dpi_display_output_params_st {
+ int dp_lanes;
+ int output_bpp;
+ int dsc_enable;
+ int wb_enable;
+ int output_bpc;
+ int output_type;
+ int output_format;
+ int output_standard;
+ int dsc_slices;
+ struct writeback_st wb;
+};
+
+struct _vcs_dpi_display_bandwidth_st {
+ double total_bw_consumed_gbps;
+ double guaranteed_urgent_return_bw_gbps;
+};
+
+struct _vcs_dpi_scaler_ratio_depth_st {
+ double hscl_ratio;
+ double vscl_ratio;
+ double hscl_ratio_c;
+ double vscl_ratio_c;
+ double vinit;
+ double vinit_c;
+ double vinit_bot;
+ double vinit_bot_c;
+ int lb_depth;
+ int scl_enable;
+};
+
+struct _vcs_dpi_scaler_taps_st {
+ unsigned int htaps;
+ unsigned int vtaps;
+ unsigned int htaps_c;
+ unsigned int vtaps_c;
+};
+
+struct _vcs_dpi_display_pipe_dest_params_st {
+ unsigned int recout_width;
+ unsigned int recout_height;
+ unsigned int full_recout_width;
+ unsigned int full_recout_height;
+ unsigned int hblank_start;
+ unsigned int hblank_end;
+ unsigned int vblank_start;
+ unsigned int vblank_end;
+ unsigned int htotal;
+ unsigned int vtotal;
+ unsigned int vactive;
+ unsigned int hactive;
+ unsigned int vstartup_start;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+ unsigned char interlaced;
+ unsigned char underscan;
+ double pixel_rate_mhz;
+ unsigned char synchronized_vblank_all_planes;
+ unsigned char otg_inst;
+ unsigned char odm_split_cnt;
+ unsigned char odm_combine;
+};
+
+struct _vcs_dpi_display_pipe_params_st {
+ display_pipe_source_params_st src;
+ display_pipe_dest_params_st dest;
+ scaler_ratio_depth_st scale_ratio_depth;
+ scaler_taps_st scale_taps;
+};
+
+struct _vcs_dpi_display_clocks_and_cfg_st {
+ int voltage;
+ double dppclk_mhz;
+ double refclk_mhz;
+ double dispclk_mhz;
+ double dcfclk_mhz;
+ double socclk_mhz;
+};
+
+struct _vcs_dpi_display_e2e_pipe_params_st {
+ display_pipe_params_st pipe;
+ display_output_params_st dout;
+ display_clocks_and_cfg_st clks_cfg;
+};
+
+struct _vcs_dpi_dchub_buffer_sizing_st {
+ unsigned int swath_width_y;
+ unsigned int swath_height_y;
+ unsigned int swath_height_c;
+ unsigned int detail_buffer_size_y;
+};
+
+struct _vcs_dpi_watermarks_perf_st {
+ double stutter_eff_in_active_region_percent;
+ double urgent_latency_supported_us;
+ double non_urgent_latency_supported_us;
+ double dram_clock_change_margin_us;
+ double dram_access_eff_percent;
+};
+
+struct _vcs_dpi_cstate_pstate_watermarks_st {
+ double cstate_exit_us;
+ double cstate_enter_plus_exit_us;
+ double pstate_change_us;
+};
+
+struct _vcs_dpi_wm_calc_pipe_params_st {
+ unsigned int num_dpp;
+ int voltage;
+ int output_type;
+ double dcfclk_mhz;
+ double socclk_mhz;
+ double dppclk_mhz;
+ double pixclk_mhz;
+ unsigned char interlace_en;
+ unsigned char pte_enable;
+ unsigned char dcc_enable;
+ double dcc_rate;
+ double bytes_per_pixel_c;
+ double bytes_per_pixel_y;
+ unsigned int swath_width_y;
+ unsigned int swath_height_y;
+ unsigned int swath_height_c;
+ unsigned int det_buffer_size_y;
+ double h_ratio;
+ double v_ratio;
+ unsigned int h_taps;
+ unsigned int h_total;
+ unsigned int v_total;
+ unsigned int v_active;
+ unsigned int e2e_index;
+ double display_pipe_line_delivery_time;
+ double read_bw;
+ unsigned int lines_in_det_y;
+ unsigned int lines_in_det_y_rounded_down_to_swath;
+ double full_det_buffering_time;
+ double dcfclk_deepsleep_mhz_per_plane;
+};
+
+struct _vcs_dpi_vratio_pre_st {
+ double vratio_pre_l;
+ double vratio_pre_c;
+};
+
+struct _vcs_dpi_display_data_rq_misc_params_st {
+ unsigned int full_swath_bytes;
+ unsigned int stored_swath_bytes;
+ unsigned int blk256_height;
+ unsigned int blk256_width;
+ unsigned int req_height;
+ unsigned int req_width;
+};
+
+struct _vcs_dpi_display_data_rq_sizing_params_st {
+ unsigned int chunk_bytes;
+ unsigned int min_chunk_bytes;
+ unsigned int meta_chunk_bytes;
+ unsigned int min_meta_chunk_bytes;
+ unsigned int mpte_group_bytes;
+ unsigned int dpte_group_bytes;
+};
+
+struct _vcs_dpi_display_data_rq_dlg_params_st {
+ unsigned int swath_width_ub;
+ unsigned int swath_height;
+ unsigned int req_per_swath_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ unsigned int dpte_req_per_row_ub;
+ unsigned int dpte_groups_per_row_ub;
+ unsigned int dpte_row_height;
+ unsigned int dpte_bytes_per_row_ub;
+ unsigned int meta_chunks_per_row_ub;
+ unsigned int meta_req_per_row_ub;
+ unsigned int meta_row_height;
+ unsigned int meta_bytes_per_row_ub;
+};
+
+struct _vcs_dpi_display_cur_rq_dlg_params_st {
+ unsigned char enable;
+ unsigned int swath_height;
+ unsigned int req_per_line;
+};
+
+struct _vcs_dpi_display_rq_dlg_params_st {
+ display_data_rq_dlg_params_st rq_l;
+ display_data_rq_dlg_params_st rq_c;
+ display_cur_rq_dlg_params_st rq_cur0;
+};
+
+struct _vcs_dpi_display_rq_sizing_params_st {
+ display_data_rq_sizing_params_st rq_l;
+ display_data_rq_sizing_params_st rq_c;
+};
+
+struct _vcs_dpi_display_rq_misc_params_st {
+ display_data_rq_misc_params_st rq_l;
+ display_data_rq_misc_params_st rq_c;
+};
+
+struct _vcs_dpi_display_rq_params_st {
+ unsigned char yuv420;
+ unsigned char yuv420_10bpc;
+ display_rq_misc_params_st misc;
+ display_rq_sizing_params_st sizing;
+ display_rq_dlg_params_st dlg;
+};
+
+struct _vcs_dpi_display_dlg_regs_st {
+ unsigned int refcyc_h_blank_end;
+ unsigned int dlg_vblank_end;
+ unsigned int min_dst_y_next_start;
+ unsigned int refcyc_per_htotal;
+ unsigned int refcyc_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ unsigned int dst_y_prefetch;
+ unsigned int dst_y_per_vm_vblank;
+ unsigned int dst_y_per_row_vblank;
+ unsigned int dst_y_per_vm_flip;
+ unsigned int dst_y_per_row_flip;
+ unsigned int ref_freq_to_pix_freq;
+ unsigned int vratio_prefetch;
+ unsigned int vratio_prefetch_c;
+ unsigned int refcyc_per_pte_group_vblank_l;
+ unsigned int refcyc_per_pte_group_vblank_c;
+ unsigned int refcyc_per_meta_chunk_vblank_l;
+ unsigned int refcyc_per_meta_chunk_vblank_c;
+ unsigned int refcyc_per_pte_group_flip_l;
+ unsigned int refcyc_per_pte_group_flip_c;
+ unsigned int refcyc_per_meta_chunk_flip_l;
+ unsigned int refcyc_per_meta_chunk_flip_c;
+ unsigned int dst_y_per_pte_row_nom_l;
+ unsigned int dst_y_per_pte_row_nom_c;
+ unsigned int refcyc_per_pte_group_nom_l;
+ unsigned int refcyc_per_pte_group_nom_c;
+ unsigned int dst_y_per_meta_row_nom_l;
+ unsigned int dst_y_per_meta_row_nom_c;
+ unsigned int refcyc_per_meta_chunk_nom_l;
+ unsigned int refcyc_per_meta_chunk_nom_c;
+ unsigned int refcyc_per_line_delivery_pre_l;
+ unsigned int refcyc_per_line_delivery_pre_c;
+ unsigned int refcyc_per_line_delivery_l;
+ unsigned int refcyc_per_line_delivery_c;
+ unsigned int chunk_hdl_adjust_cur0;
+ unsigned int chunk_hdl_adjust_cur1;
+ unsigned int vready_after_vcount0;
+ unsigned int dst_y_offset_cur0;
+ unsigned int dst_y_offset_cur1;
+ unsigned int xfc_reg_transfer_delay;
+ unsigned int xfc_reg_precharge_delay;
+ unsigned int xfc_reg_remote_surface_flip_latency;
+ unsigned int xfc_reg_prefetch_margin;
+ unsigned int dst_y_delta_drq_limit;
+};
+
+struct _vcs_dpi_display_ttu_regs_st {
+ unsigned int qos_level_low_wm;
+ unsigned int qos_level_high_wm;
+ unsigned int min_ttu_vblank;
+ unsigned int qos_level_flip;
+ unsigned int refcyc_per_req_delivery_l;
+ unsigned int refcyc_per_req_delivery_c;
+ unsigned int refcyc_per_req_delivery_cur0;
+ unsigned int refcyc_per_req_delivery_cur1;
+ unsigned int refcyc_per_req_delivery_pre_l;
+ unsigned int refcyc_per_req_delivery_pre_c;
+ unsigned int refcyc_per_req_delivery_pre_cur0;
+ unsigned int refcyc_per_req_delivery_pre_cur1;
+ unsigned int qos_level_fixed_l;
+ unsigned int qos_level_fixed_c;
+ unsigned int qos_level_fixed_cur0;
+ unsigned int qos_level_fixed_cur1;
+ unsigned int qos_ramp_disable_l;
+ unsigned int qos_ramp_disable_c;
+ unsigned int qos_ramp_disable_cur0;
+ unsigned int qos_ramp_disable_cur1;
+};
+
+struct _vcs_dpi_display_data_rq_regs_st {
+ unsigned int chunk_size;
+ unsigned int min_chunk_size;
+ unsigned int meta_chunk_size;
+ unsigned int min_meta_chunk_size;
+ unsigned int dpte_group_size;
+ unsigned int mpte_group_size;
+ unsigned int swath_height;
+ unsigned int pte_row_height_linear;
+};
+
+struct _vcs_dpi_display_rq_regs_st {
+ display_data_rq_regs_st rq_regs_l;
+ display_data_rq_regs_st rq_regs_c;
+ unsigned int drq_expansion_mode;
+ unsigned int prq_expansion_mode;
+ unsigned int mrq_expansion_mode;
+ unsigned int crq_expansion_mode;
+ unsigned int plane1_base_address;
+};
+
+struct _vcs_dpi_display_dlg_sys_params_st {
+ double t_mclk_wm_us;
+ double t_urg_wm_us;
+ double t_sr_wm_us;
+ double t_extra_us;
+ double mem_trip_us;
+ double t_srx_delay_us;
+ double deepsleep_dcfclk_mhz;
+ double total_flip_bw;
+ unsigned int total_flip_bytes;
+};
+
+struct _vcs_dpi_display_dlg_prefetch_param_st {
+ double prefetch_bw;
+ unsigned int flip_bytes;
+};
+
+struct _vcs_dpi_display_pipe_clock_st {
+ double dcfclk_mhz;
+ double dispclk_mhz;
+ double socclk_mhz;
+ double dscclk_mhz[6];
+ double dppclk_mhz[6];
+};
+
+struct _vcs_dpi_display_arb_params_st {
+ int max_req_outstanding;
+ int min_req_outstanding;
+ int sat_level_us;
+};
+
+#endif /*__DISPLAY_MODE_STRUCTS_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
new file mode 100644
index 000000000000..ea661ee44674
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -0,0 +1,6124 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "display_mode_vba.h"
+
+#include "dml_inline_defs.h"
+
+static const unsigned int NumberOfStates = DC__VOLTAGE_STATES;
+
+static void fetch_socbb_params(struct display_mode_lib *mode_lib);
+static void fetch_ip_params(struct display_mode_lib *mode_lib);
+static void fetch_pipe_params(struct display_mode_lib *mode_lib);
+static void recalculate_params(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+static void recalculate(struct display_mode_lib *mode_lib);
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN);
+static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib);
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib);
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat);
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat);
+// Super monster function with some 45 argument
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFClkDeepSleep,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool VirtualMemoryEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ unsigned int *VUpdateWidthPix,
+ unsigned int *VReadyOffsetPix);
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed);
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed);
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath);
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidthY,
+ bool VirtualMemoryEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height);
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime);
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk);
+static double CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ double WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackChromaLineBufferWidth);
+static void CalculateActiveRowBandwidth(
+ bool VirtualMemoryEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw);
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int MaxPageTableLevels,
+ bool VirtualMemoryEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double Tno_bw,
+ double VRatio,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe);
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth);
+static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib);
+static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp);
+static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib);
+
+void set_prefetch_mode(
+ struct display_mode_lib *mode_lib,
+ bool cstate_en,
+ bool pstate_en,
+ bool ignore_viewport_pos,
+ bool immediate_flip_support)
+{
+ unsigned int prefetch_mode;
+
+ if (cstate_en && pstate_en)
+ prefetch_mode = 0;
+ else if (cstate_en)
+ prefetch_mode = 1;
+ else
+ prefetch_mode = 2;
+ if (prefetch_mode != mode_lib->vba.PrefetchMode
+ || ignore_viewport_pos != mode_lib->vba.IgnoreViewportPositioning
+ || immediate_flip_support != mode_lib->vba.ImmediateFlipSupport) {
+ DTRACE(
+ " Prefetch mode has changed from %i to %i. Recalculating.",
+ prefetch_mode,
+ mode_lib->vba.PrefetchMode);
+ mode_lib->vba.PrefetchMode = prefetch_mode;
+ mode_lib->vba.IgnoreViewportPositioning = ignore_viewport_pos;
+ mode_lib->vba.ImmediateFlipSupport = immediate_flip_support;
+ recalculate(mode_lib);
+ }
+}
+
+unsigned int dml_get_voltage_level(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ bool need_recalculate = memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
+ || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
+ || num_pipes != mode_lib->vba.cache_num_pipes
+ || memcmp(pipes, mode_lib->vba.cache_pipes,
+ sizeof(display_e2e_pipe_params_st) * num_pipes) != 0;
+
+ mode_lib->vba.soc = mode_lib->soc;
+ mode_lib->vba.ip = mode_lib->ip;
+ memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
+ mode_lib->vba.cache_num_pipes = num_pipes;
+
+ if (need_recalculate && pipes[0].clks_cfg.dppclk_mhz != 0)
+ recalculate(mode_lib);
+ else {
+ fetch_socbb_params(mode_lib);
+ fetch_ip_params(mode_lib);
+ fetch_pipe_params(mode_lib);
+ }
+ ModeSupportAndSystemConfigurationFull(mode_lib);
+
+ return mode_lib->vba.VoltageLevel;
+}
+
+#define dml_get_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes) \
+{ \
+ recalculate_params(mode_lib, pipes, num_pipes); \
+ return var; \
+}
+
+dml_get_attr_func(clk_dcf_deepsleep, mode_lib->vba.DCFClkDeepSleep);
+dml_get_attr_func(wm_urgent, mode_lib->vba.UrgentWatermark);
+dml_get_attr_func(wm_memory_trip, mode_lib->vba.MemoryTripWatermark);
+dml_get_attr_func(wm_writeback_urgent, mode_lib->vba.WritebackUrgentWatermark);
+dml_get_attr_func(wm_stutter_exit, mode_lib->vba.StutterExitWatermark);
+dml_get_attr_func(wm_stutter_enter_exit, mode_lib->vba.StutterEnterPlusExitWatermark);
+dml_get_attr_func(wm_dram_clock_change, mode_lib->vba.DRAMClockChangeWatermark);
+dml_get_attr_func(wm_writeback_dram_clock_change, mode_lib->vba.WritebackDRAMClockChangeWatermark);
+dml_get_attr_func(wm_xfc_underflow, mode_lib->vba.UrgentWatermark); // xfc_underflow maps to urgent
+dml_get_attr_func(stutter_efficiency, mode_lib->vba.StutterEfficiency);
+dml_get_attr_func(stutter_efficiency_no_vblank, mode_lib->vba.StutterEfficiencyNotIncludingVBlank);
+dml_get_attr_func(urgent_latency, mode_lib->vba.MinUrgentLatencySupportUs);
+dml_get_attr_func(urgent_extra_latency, mode_lib->vba.UrgentExtraLatency);
+dml_get_attr_func(nonurgent_latency, mode_lib->vba.NonUrgentLatencyTolerance);
+dml_get_attr_func(
+ dram_clock_change_latency,
+ mode_lib->vba.MinActiveDRAMClockChangeLatencySupported);
+dml_get_attr_func(dispclk_calculated, mode_lib->vba.DISPCLK_calculated);
+dml_get_attr_func(total_data_read_bw, mode_lib->vba.TotalDataReadBandwidth);
+dml_get_attr_func(return_bw, mode_lib->vba.ReturnBW);
+dml_get_attr_func(tcalc, mode_lib->vba.TCalc);
+
+#define dml_get_pipe_attr_func(attr, var) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe) \
+{\
+ unsigned int which_plane; \
+ recalculate_params(mode_lib, pipes, num_pipes); \
+ which_plane = mode_lib->vba.pipe_plane[which_pipe]; \
+ return var[which_plane]; \
+}
+
+dml_get_pipe_attr_func(dsc_delay, mode_lib->vba.DSCDelay);
+dml_get_pipe_attr_func(dppclk_calculated, mode_lib->vba.DPPCLK_calculated);
+dml_get_pipe_attr_func(dscclk_calculated, mode_lib->vba.DSCCLK_calculated);
+dml_get_pipe_attr_func(min_ttu_vblank, mode_lib->vba.MinTTUVBlank);
+dml_get_pipe_attr_func(vratio_prefetch_l, mode_lib->vba.VRatioPrefetchY);
+dml_get_pipe_attr_func(vratio_prefetch_c, mode_lib->vba.VRatioPrefetchC);
+dml_get_pipe_attr_func(dst_x_after_scaler, mode_lib->vba.DSTXAfterScaler);
+dml_get_pipe_attr_func(dst_y_after_scaler, mode_lib->vba.DSTYAfterScaler);
+dml_get_pipe_attr_func(dst_y_per_vm_vblank, mode_lib->vba.DestinationLinesToRequestVMInVBlank);
+dml_get_pipe_attr_func(dst_y_per_row_vblank, mode_lib->vba.DestinationLinesToRequestRowInVBlank);
+dml_get_pipe_attr_func(dst_y_prefetch, mode_lib->vba.DestinationLinesForPrefetch);
+dml_get_pipe_attr_func(dst_y_per_vm_flip, mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip);
+dml_get_pipe_attr_func(
+ dst_y_per_row_flip,
+ mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip);
+
+dml_get_pipe_attr_func(xfc_transfer_delay, mode_lib->vba.XFCTransferDelay);
+dml_get_pipe_attr_func(xfc_precharge_delay, mode_lib->vba.XFCPrechargeDelay);
+dml_get_pipe_attr_func(xfc_remote_surface_flip_latency, mode_lib->vba.XFCRemoteSurfaceFlipLatency);
+dml_get_pipe_attr_func(xfc_prefetch_margin, mode_lib->vba.XFCPrefetchMargin);
+
+unsigned int get_vstartup_calculated(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes,
+ unsigned int which_pipe)
+{
+ unsigned int which_plane;
+
+ recalculate_params(mode_lib, pipes, num_pipes);
+ which_plane = mode_lib->vba.pipe_plane[which_pipe];
+ return mode_lib->vba.VStartup[which_plane];
+}
+
+double get_total_immediate_flip_bytes(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ recalculate_params(mode_lib, pipes, num_pipes);
+ return mode_lib->vba.TotImmediateFlipBytes;
+}
+
+double get_total_immediate_flip_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ recalculate_params(mode_lib, pipes, num_pipes);
+ return mode_lib->vba.ImmediateFlipBW;
+}
+
+double get_total_prefetch_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ unsigned int k;
+ double total_prefetch_bw = 0.0;
+
+ recalculate_params(mode_lib, pipes, num_pipes);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ total_prefetch_bw += mode_lib->vba.PrefetchBandwidth[k];
+ return total_prefetch_bw;
+}
+
+static void fetch_socbb_params(struct display_mode_lib *mode_lib)
+{
+ soc_bounding_box_st *soc = &mode_lib->vba.soc;
+ unsigned int i;
+
+ // SOC Bounding Box Parameters
+ mode_lib->vba.ReturnBusWidth = soc->return_bus_width_bytes;
+ mode_lib->vba.NumberOfChannels = soc->num_chans;
+ mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency =
+ soc->ideal_dram_bw_after_urgent_percent; // there's always that one bastard variable that's so long it throws everything out of alignment!
+ mode_lib->vba.UrgentLatency = soc->urgent_latency_us;
+ mode_lib->vba.RoundTripPingLatencyCycles = soc->round_trip_ping_latency_dcfclk_cycles;
+ mode_lib->vba.UrgentOutOfOrderReturnPerChannel =
+ soc->urgent_out_of_order_return_per_channel_bytes;
+ mode_lib->vba.WritebackLatency = soc->writeback_latency_us;
+ mode_lib->vba.SRExitTime = soc->sr_exit_time_us;
+ mode_lib->vba.SREnterPlusExitTime = soc->sr_enter_plus_exit_time_us;
+ mode_lib->vba.DRAMClockChangeLatency = soc->dram_clock_change_latency_us;
+ mode_lib->vba.Downspreading = soc->downspread_percent;
+ mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
+ mode_lib->vba.FabricDatapathToDCNDataReturn = soc->fabric_datapath_to_dcn_data_return_bytes; // new!
+ mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading = soc->dcn_downspread_percent; // new
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed = soc->dispclk_dppclk_vco_speed_mhz; // new
+ mode_lib->vba.VMMPageSize = soc->vmm_page_size_bytes;
+ // Set the voltage scaling clocks as the defaults. Most of these will
+ // be set to different values by the test
+ for (i = 0; i < DC__VOLTAGE_STATES; i++)
+ if (soc->clock_limits[i].state == mode_lib->vba.VoltageLevel)
+ break;
+
+ mode_lib->vba.DCFCLK = soc->clock_limits[i].dcfclk_mhz;
+ mode_lib->vba.SOCCLK = soc->clock_limits[i].socclk_mhz;
+ mode_lib->vba.DRAMSpeed = soc->clock_limits[i].dram_speed_mhz;
+ mode_lib->vba.FabricClock = soc->clock_limits[i].fabricclk_mhz;
+
+ mode_lib->vba.XFCBusTransportTime = soc->xfc_bus_transport_time_us;
+ mode_lib->vba.XFCXBUFLatencyTolerance = soc->xfc_xbuf_latency_tolerance_us;
+
+ mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp = false;
+ mode_lib->vba.MaxHSCLRatio = 4;
+ mode_lib->vba.MaxVSCLRatio = 4;
+ mode_lib->vba.MaxNumWriteback = 0; /*TODO*/
+ mode_lib->vba.WritebackLumaAndChromaScalingSupported = true;
+ mode_lib->vba.Cursor64BppSupport = true;
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.DCFCLKPerState[i] = soc->clock_limits[i].dcfclk_mhz;
+ mode_lib->vba.FabricClockPerState[i] = soc->clock_limits[i].fabricclk_mhz;
+ mode_lib->vba.SOCCLKPerState[i] = soc->clock_limits[i].socclk_mhz;
+ mode_lib->vba.PHYCLKPerState[i] = soc->clock_limits[i].phyclk_mhz;
+ mode_lib->vba.MaxDppclk[i] = soc->clock_limits[i].dppclk_mhz;
+ mode_lib->vba.MaxDSCCLK[i] = soc->clock_limits[i].dscclk_mhz;
+ mode_lib->vba.DRAMSpeedPerState[i] = soc->clock_limits[i].dram_speed_mhz;
+ mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
+ }
+}
+
+static void fetch_ip_params(struct display_mode_lib *mode_lib)
+{
+ ip_params_st *ip = &mode_lib->vba.ip;
+
+ // IP Parameters
+ mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
+ mode_lib->vba.MaxNumOTG = ip->max_num_otg;
+ mode_lib->vba.CursorChunkSize = ip->cursor_chunk_size;
+ mode_lib->vba.CursorBufferSize = ip->cursor_buffer_size;
+
+ mode_lib->vba.MaxDCHUBToPSCLThroughput = ip->max_dchub_pscl_bw_pix_per_clk;
+ mode_lib->vba.MaxPSCLToLBThroughput = ip->max_pscl_lb_bw_pix_per_clk;
+ mode_lib->vba.ROBBufferSizeInKByte = ip->rob_buffer_size_kbytes;
+ mode_lib->vba.DETBufferSizeInKByte = ip->det_buffer_size_kbytes;
+ mode_lib->vba.PixelChunkSizeInKByte = ip->pixel_chunk_size_kbytes;
+ mode_lib->vba.MetaChunkSize = ip->meta_chunk_size_kbytes;
+ mode_lib->vba.PTEChunkSize = ip->pte_chunk_size_kbytes;
+ mode_lib->vba.WritebackChunkSize = ip->writeback_chunk_size_kbytes;
+ mode_lib->vba.LineBufferSize = ip->line_buffer_size_bits;
+ mode_lib->vba.MaxLineBufferLines = ip->max_line_buffer_lines;
+ mode_lib->vba.PTEBufferSizeInRequests = ip->dpte_buffer_size_in_pte_reqs;
+ mode_lib->vba.DPPOutputBufferPixels = ip->dpp_output_buffer_pixels;
+ mode_lib->vba.OPPOutputBufferLines = ip->opp_output_buffer_lines;
+ mode_lib->vba.WritebackInterfaceLumaBufferSize = ip->writeback_luma_buffer_size_kbytes;
+ mode_lib->vba.WritebackInterfaceChromaBufferSize = ip->writeback_chroma_buffer_size_kbytes;
+ mode_lib->vba.WritebackChromaLineBufferWidth =
+ ip->writeback_chroma_line_buffer_width_pixels;
+ mode_lib->vba.MaxPageTableLevels = ip->max_page_table_levels;
+ mode_lib->vba.MaxInterDCNTileRepeaters = ip->max_inter_dcn_tile_repeaters;
+ mode_lib->vba.NumberOfDSC = ip->num_dsc;
+ mode_lib->vba.ODMCapability = ip->odm_capable;
+ mode_lib->vba.DISPCLKRampingMargin = ip->dispclk_ramp_margin_percent;
+
+ mode_lib->vba.XFCSupported = ip->xfc_supported;
+ mode_lib->vba.XFCFillBWOverhead = ip->xfc_fill_bw_overhead_percent;
+ mode_lib->vba.XFCFillConstant = ip->xfc_fill_constant_bytes;
+ mode_lib->vba.DPPCLKDelaySubtotal = ip->dppclk_delay_subtotal;
+ mode_lib->vba.DPPCLKDelaySCL = ip->dppclk_delay_scl;
+ mode_lib->vba.DPPCLKDelaySCLLBOnly = ip->dppclk_delay_scl_lb_only;
+ mode_lib->vba.DPPCLKDelayCNVCFormater = ip->dppclk_delay_cnvc_formatter;
+ mode_lib->vba.DPPCLKDelayCNVCCursor = ip->dppclk_delay_cnvc_cursor;
+ mode_lib->vba.DISPCLKDelaySubtotal = ip->dispclk_delay_subtotal;
+
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP = ip->ptoi_supported;
+
+ mode_lib->vba.PDEProcessingBufIn64KBReqs = ip->pde_proc_buffer_size_64k_reqs;
+}
+
+static void fetch_pipe_params(struct display_mode_lib *mode_lib)
+{
+ display_e2e_pipe_params_st *pipes = mode_lib->vba.cache_pipes;
+ ip_params_st *ip = &mode_lib->vba.ip;
+
+ unsigned int OTGInstPlane[DC__NUM_DPP__MAX];
+ unsigned int j, k;
+ bool PlaneVisited[DC__NUM_DPP__MAX];
+ bool visited[DC__NUM_DPP__MAX];
+
+ // Convert Pipes to Planes
+ for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k)
+ visited[k] = false;
+
+ mode_lib->vba.NumberOfActivePlanes = 0;
+ for (j = 0; j < mode_lib->vba.cache_num_pipes; ++j) {
+ display_pipe_source_params_st *src = &pipes[j].pipe.src;
+ display_pipe_dest_params_st *dst = &pipes[j].pipe.dest;
+ scaler_ratio_depth_st *scl = &pipes[j].pipe.scale_ratio_depth;
+ scaler_taps_st *taps = &pipes[j].pipe.scale_taps;
+ display_output_params_st *dout = &pipes[j].dout;
+ display_clocks_and_cfg_st *clks = &pipes[j].clks_cfg;
+
+ if (visited[j])
+ continue;
+ visited[j] = true;
+
+ mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
+
+ mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
+ mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
+ (enum scan_direction_class) (src->source_scan);
+ mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_width;
+ mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_height;
+ mode_lib->vba.ViewportYStartY[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_y_y;
+ mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
+ src->viewport_y_c;
+ mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
+ mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
+ mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
+ mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
+ mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] = scl->vscl_ratio;
+ mode_lib->vba.ScalerEnabled[mode_lib->vba.NumberOfActivePlanes] = scl->scl_enable;
+ mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes] = dst->interlaced;
+ if (mode_lib->vba.Interlace[mode_lib->vba.NumberOfActivePlanes])
+ mode_lib->vba.VRatio[mode_lib->vba.NumberOfActivePlanes] *= 2.0;
+ mode_lib->vba.htaps[mode_lib->vba.NumberOfActivePlanes] = taps->htaps;
+ mode_lib->vba.vtaps[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps;
+ mode_lib->vba.HTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->htaps_c;
+ mode_lib->vba.VTAPsChroma[mode_lib->vba.NumberOfActivePlanes] = taps->vtaps_c;
+ mode_lib->vba.HTotal[mode_lib->vba.NumberOfActivePlanes] = dst->htotal;
+ mode_lib->vba.VTotal[mode_lib->vba.NumberOfActivePlanes] = dst->vtotal;
+ mode_lib->vba.DCCEnable[mode_lib->vba.NumberOfActivePlanes] =
+ src->dcc_use_global ?
+ ip->dcc_supported : src->dcc && ip->dcc_supported;
+ mode_lib->vba.DCCRate[mode_lib->vba.NumberOfActivePlanes] = src->dcc_rate;
+ mode_lib->vba.SourcePixelFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum source_format_class) (src->source_format);
+ mode_lib->vba.HActive[mode_lib->vba.NumberOfActivePlanes] = dst->hactive;
+ mode_lib->vba.VActive[mode_lib->vba.NumberOfActivePlanes] = dst->vactive;
+ mode_lib->vba.SurfaceTiling[mode_lib->vba.NumberOfActivePlanes] =
+ (enum dm_swizzle_mode) (src->sw_mode);
+ mode_lib->vba.ScalerRecoutWidth[mode_lib->vba.NumberOfActivePlanes] =
+ dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode?
+ mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] =
+ dst->odm_combine;
+ mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum output_format_class) (dout->output_format);
+ mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] =
+ (enum output_encoder_class) (dout->output_type);
+ mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp;
+ mode_lib->vba.OutputLinkDPLanes[mode_lib->vba.NumberOfActivePlanes] =
+ dout->dp_lanes;
+ mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] =
+ dout->dsc_slices;
+ mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] =
+ dout->output_bpc == 0 ? 12 : dout->output_bpc;
+ mode_lib->vba.WritebackEnable[mode_lib->vba.NumberOfActivePlanes] = dout->wb_enable;
+ mode_lib->vba.WritebackSourceHeight[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_src_height;
+ mode_lib->vba.WritebackDestinationWidth[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_dst_width;
+ mode_lib->vba.WritebackDestinationHeight[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_dst_height;
+ mode_lib->vba.WritebackPixelFormat[mode_lib->vba.NumberOfActivePlanes] =
+ (enum source_format_class) (dout->wb.wb_pixel_format);
+ mode_lib->vba.WritebackLumaHTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_htaps_luma;
+ mode_lib->vba.WritebackLumaVTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vtaps_luma;
+ mode_lib->vba.WritebackChromaHTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_htaps_chroma;
+ mode_lib->vba.WritebackChromaVTaps[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vtaps_chroma;
+ mode_lib->vba.WritebackHRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_hratio;
+ mode_lib->vba.WritebackVRatio[mode_lib->vba.NumberOfActivePlanes] =
+ dout->wb.wb_vratio;
+
+ mode_lib->vba.DynamicMetadataEnable[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_enable;
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_lines_before_active;
+ mode_lib->vba.DynamicMetadataTransmittedBytes[mode_lib->vba.NumberOfActivePlanes] =
+ src->dynamic_metadata_xmit_bytes;
+
+ mode_lib->vba.XFCEnabled[mode_lib->vba.NumberOfActivePlanes] = src->xfc_enable
+ && ip->xfc_supported;
+ mode_lib->vba.XFCSlvChunkSize = src->xfc_params.xfc_slv_chunk_size_bytes;
+ mode_lib->vba.XFCTSlvVupdateOffset = src->xfc_params.xfc_tslv_vupdate_offset_us;
+ mode_lib->vba.XFCTSlvVupdateWidth = src->xfc_params.xfc_tslv_vupdate_width_us;
+ mode_lib->vba.XFCTSlvVreadyOffset = src->xfc_params.xfc_tslv_vready_offset_us;
+ mode_lib->vba.PixelClock[mode_lib->vba.NumberOfActivePlanes] = dst->pixel_rate_mhz;
+ mode_lib->vba.DPPCLK[mode_lib->vba.NumberOfActivePlanes] = clks->dppclk_mhz;
+ if (ip->is_line_buffer_bpp_fixed)
+ mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] =
+ ip->line_buffer_fixed_bpp;
+ else {
+ unsigned int lb_depth;
+
+ switch (scl->lb_depth) {
+ case dm_lb_6:
+ lb_depth = 18;
+ break;
+ case dm_lb_8:
+ lb_depth = 24;
+ break;
+ case dm_lb_10:
+ lb_depth = 30;
+ break;
+ case dm_lb_12:
+ lb_depth = 36;
+ break;
+ case dm_lb_16:
+ lb_depth = 48;
+ break;
+ default:
+ lb_depth = 36;
+ }
+ mode_lib->vba.LBBitPerPixel[mode_lib->vba.NumberOfActivePlanes] = lb_depth;
+ }
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes] = 0;
+ // The DML spreadsheet assumes that the two cursors utilize the same amount of bandwidth. We'll
+ // calculate things a little more accurately
+ for (k = 0; k < DC__NUM_CURSOR__MAX; ++k) {
+ switch (k) {
+ case 0:
+ mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][0] =
+ CursorBppEnumToBits(
+ (enum cursor_bpp) (src->cur0_bpp));
+ mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][0] =
+ src->cur0_src_width;
+ if (src->cur0_src_width > 0)
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
+ break;
+ case 1:
+ mode_lib->vba.CursorBPP[mode_lib->vba.NumberOfActivePlanes][1] =
+ CursorBppEnumToBits(
+ (enum cursor_bpp) (src->cur1_bpp));
+ mode_lib->vba.CursorWidth[mode_lib->vba.NumberOfActivePlanes][1] =
+ src->cur1_src_width;
+ if (src->cur1_src_width > 0)
+ mode_lib->vba.NumberOfCursors[mode_lib->vba.NumberOfActivePlanes]++;
+ break;
+ default:
+ dml_print(
+ "ERROR: Number of cursors specified exceeds supported maximum\n")
+ ;
+ }
+ }
+
+ OTGInstPlane[mode_lib->vba.NumberOfActivePlanes] = dst->otg_inst;
+
+ if (dst->odm_combine && !src->is_hsplit)
+ dml_print(
+ "ERROR: ODM Combine is specified but is_hsplit has not be specified for pipe %i\n",
+ j);
+
+ if (src->is_hsplit) {
+ for (k = j + 1; k < mode_lib->vba.cache_num_pipes; ++k) {
+ display_pipe_source_params_st *src_k = &pipes[k].pipe.src;
+ display_output_params_st *dout_k = &pipes[k].dout;
+
+ if (src_k->is_hsplit && !visited[k]
+ && src->hsplit_grp == src_k->hsplit_grp) {
+ mode_lib->vba.pipe_plane[k] =
+ mode_lib->vba.NumberOfActivePlanes;
+ mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes]++;
+ if (mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes]
+ == dm_horz)
+ mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_width;
+ else
+ mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] +=
+ src_k->viewport_height;
+
+ mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] +=
+ dout_k->dsc_slices;
+ visited[k] = true;
+ }
+ }
+ }
+
+ mode_lib->vba.NumberOfActivePlanes++;
+ }
+
+ // handle overlays through dml_ml->vba.BlendingAndTiming
+ // dml_ml->vba.BlendingAndTiming tells you which instance to look at to get timing, the so called 'master'
+
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ PlaneVisited[j] = false;
+
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ for (k = j + 1; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!PlaneVisited[k] && OTGInstPlane[j] == OTGInstPlane[k]) {
+ // doesn't matter, so choose the smaller one
+ mode_lib->vba.BlendingAndTiming[j] = j;
+ PlaneVisited[j] = true;
+ mode_lib->vba.BlendingAndTiming[k] = j;
+ PlaneVisited[k] = true;
+ }
+ }
+
+ if (!PlaneVisited[j]) {
+ mode_lib->vba.BlendingAndTiming[j] = j;
+ PlaneVisited[j] = true;
+ }
+ }
+
+ // TODO: dml_ml->vba.ODMCombineEnabled => 2 * dml_ml->vba.DPPPerPlane...actually maybe not since all pipes are specified
+ // Do we want the dscclk to automatically be halved? Guess not since the value is specified
+
+ mode_lib->vba.SynchronizedVBlank = pipes[0].pipe.dest.synchronized_vblank_all_planes;
+ for (k = 1; k < mode_lib->vba.cache_num_pipes; ++k)
+ ASSERT(mode_lib->vba.SynchronizedVBlank == pipes[k].pipe.dest.synchronized_vblank_all_planes);
+
+ mode_lib->vba.VirtualMemoryEnable = false;
+ mode_lib->vba.OverridePageTableLevels = 0;
+
+ for (k = 0; k < mode_lib->vba.cache_num_pipes; ++k) {
+ mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable
+ || !!pipes[k].pipe.src.vm;
+ mode_lib->vba.OverridePageTableLevels =
+ (pipes[k].pipe.src.vm_levels_force_en
+ && mode_lib->vba.OverridePageTableLevels
+ < pipes[k].pipe.src.vm_levels_force) ?
+ pipes[k].pipe.src.vm_levels_force :
+ mode_lib->vba.OverridePageTableLevels;
+ }
+
+ if (mode_lib->vba.OverridePageTableLevels)
+ mode_lib->vba.MaxPageTableLevels = mode_lib->vba.OverridePageTableLevels;
+
+ mode_lib->vba.VirtualMemoryEnable = mode_lib->vba.VirtualMemoryEnable && !!ip->pte_enable;
+
+ mode_lib->vba.FabricAndDRAMBandwidth = dml_min(
+ mode_lib->vba.DRAMSpeed * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClock * mode_lib->vba.FabricDatapathToDCNDataReturn)
+ / 1000.0;
+
+ // TODO: Must be consistent across all pipes
+ // DCCProgrammingAssumesScanDirectionUnknown = src.dcc_scan_dir_unknown;
+}
+
+static void recalculate(struct display_mode_lib *mode_lib)
+{
+ ModeSupportAndSystemConfiguration(mode_lib);
+ PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib);
+ DisplayPipeConfiguration(mode_lib);
+ DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(mode_lib);
+}
+
+// in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs
+// rather than working them out as in recalculate_ms
+static void recalculate_params(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes)
+{
+ // This is only safe to use memcmp because there are non-POD types in struct display_mode_lib
+ if (memcmp(&mode_lib->soc, &mode_lib->vba.soc, sizeof(mode_lib->vba.soc)) != 0
+ || memcmp(&mode_lib->ip, &mode_lib->vba.ip, sizeof(mode_lib->vba.ip)) != 0
+ || num_pipes != mode_lib->vba.cache_num_pipes
+ || memcmp(
+ pipes,
+ mode_lib->vba.cache_pipes,
+ sizeof(display_e2e_pipe_params_st) * num_pipes) != 0) {
+ mode_lib->vba.soc = mode_lib->soc;
+ mode_lib->vba.ip = mode_lib->ip;
+ memcpy(mode_lib->vba.cache_pipes, pipes, sizeof(*pipes) * num_pipes);
+ mode_lib->vba.cache_num_pipes = num_pipes;
+ recalculate(mode_lib);
+ }
+}
+
+static void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib)
+{
+ soc_bounding_box_st *soc = &mode_lib->vba.soc;
+ unsigned int i, k;
+ unsigned int total_pipes = 0;
+
+ mode_lib->vba.VoltageLevel = mode_lib->vba.cache_pipes[0].clks_cfg.voltage;
+ for (i = 1; i < mode_lib->vba.cache_num_pipes; ++i)
+ ASSERT(mode_lib->vba.VoltageLevel == -1 || mode_lib->vba.VoltageLevel == mode_lib->vba.cache_pipes[i].clks_cfg.voltage);
+
+ mode_lib->vba.DCFCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dcfclk_mhz;
+ mode_lib->vba.SOCCLK = mode_lib->vba.cache_pipes[0].clks_cfg.socclk_mhz;
+
+ if (mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz > 0.0)
+ mode_lib->vba.DISPCLK = mode_lib->vba.cache_pipes[0].clks_cfg.dispclk_mhz;
+ else
+ mode_lib->vba.DISPCLK = soc->clock_limits[mode_lib->vba.VoltageLevel].dispclk_mhz;
+
+ fetch_socbb_params(mode_lib);
+ fetch_ip_params(mode_lib);
+ fetch_pipe_params(mode_lib);
+
+ // Total Available Pipes Support Check
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ total_pipes += mode_lib->vba.DPPPerPlane[k];
+ ASSERT(total_pipes <= DC__NUM_DPP__MAX);
+}
+
+static double adjust_ReturnBW(
+ struct display_mode_lib *mode_lib,
+ double ReturnBW,
+ bool DCCEnabledAnyPlane,
+ double ReturnBandwidthToDCN)
+{
+ double CriticalCompression;
+
+ if (DCCEnabledAnyPlane
+ && ReturnBandwidthToDCN
+ > mode_lib->vba.DCFCLK * mode_lib->vba.ReturnBusWidth / 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ ReturnBandwidthToDCN * 4
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ / ReturnBandwidthToDCN
+ - mode_lib->vba.DCFCLK
+ * mode_lib->vba.ReturnBusWidth
+ / 4)
+ + mode_lib->vba.UrgentLatency));
+
+ CriticalCompression = 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatency
+ / (ReturnBandwidthToDCN * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024);
+
+ if (DCCEnabledAnyPlane && CriticalCompression > 1.0 && CriticalCompression < 4.0)
+ ReturnBW =
+ dml_min(
+ ReturnBW,
+ 4.0 * ReturnBandwidthToDCN
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLK
+ * mode_lib->vba.UrgentLatency
+ / dml_pow(
+ (ReturnBandwidthToDCN
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024),
+ 2));
+
+ return ReturnBW;
+}
+
+static unsigned int dscceComputeDelay(
+ unsigned int bpc,
+ double bpp,
+ unsigned int sliceWidth,
+ unsigned int numSlices,
+ enum output_format_class pixelFormat)
+{
+ // valid bpc = source bits per component in the set of {8, 10, 12}
+ // valid bpp = increments of 1/16 of a bit
+ // min = 6/7/8 in N420/N422/444, respectively
+ // max = such that compression is 1:1
+ //valid sliceWidth = number of pixels per slice line, must be less than or equal to 5184/numSlices (or 4096/numSlices in 420 mode)
+ //valid numSlices = number of slices in the horiziontal direction per DSC engine in the set of {1, 2, 3, 4}
+ //valid pixelFormat = pixel/color format in the set of {:N444_RGB, :S422, :N422, :N420}
+
+ // fixed value
+ unsigned int rcModelSize = 8192;
+
+ // N422/N420 operate at 2 pixels per clock
+ unsigned int pixelsPerClock, lstall, D, initalXmitDelay, w, s, ix, wx, p, l0, a, ax, l,
+ Delay, pixels;
+
+ if (pixelFormat == dm_n422 || pixelFormat == dm_420)
+ pixelsPerClock = 2;
+ // #all other modes operate at 1 pixel per clock
+ else
+ pixelsPerClock = 1;
+
+ //initial transmit delay as per PPS
+ initalXmitDelay = dml_round(rcModelSize / 2.0 / bpp / pixelsPerClock);
+
+ //compute ssm delay
+ if (bpc == 8)
+ D = 81;
+ else if (bpc == 10)
+ D = 89;
+ else
+ D = 113;
+
+ //divide by pixel per cycle to compute slice width as seen by DSC
+ w = sliceWidth / pixelsPerClock;
+
+ //422 mode has an additional cycle of delay
+ if (pixelFormat == dm_s422)
+ s = 1;
+ else
+ s = 0;
+
+ //main calculation for the dscce
+ ix = initalXmitDelay + 45;
+ wx = (w + 2) / 3;
+ p = 3 * wx - w;
+ l0 = ix / w;
+ a = ix + p * l0;
+ ax = (a + 2) / 3 + D + 6 + 1;
+ l = (ax + wx - 1) / wx;
+ if ((ix % w) == 0 && p != 0)
+ lstall = 1;
+ else
+ lstall = 0;
+ Delay = l * wx * (numSlices - 1) + ax + s + lstall + 22;
+
+ //dsc processes 3 pixel containers per cycle and a container can contain 1 or 2 pixels
+ pixels = Delay * 3 * pixelsPerClock;
+ return pixels;
+}
+
+static unsigned int dscComputeDelay(enum output_format_class pixelFormat)
+{
+ unsigned int Delay = 0;
+
+ if (pixelFormat == dm_420) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 2;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 13;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc gets pixels every other cycle
+ Delay = Delay + 3;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else if (pixelFormat == dm_n422) {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 1;
+ // dscc - input deserializer
+ Delay = Delay + 5;
+ // dscc - input cdc fifo
+ Delay = Delay + 25;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 10;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // sft
+ Delay = Delay + 1;
+ } else {
+ // sfr
+ Delay = Delay + 2;
+ // dsccif
+ Delay = Delay + 0;
+ // dscc - input deserializer
+ Delay = Delay + 3;
+ // dscc - input cdc fifo
+ Delay = Delay + 12;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // dscc - output cdc fifo
+ Delay = Delay + 7;
+ // dscc - output serializer
+ Delay = Delay + 1;
+ // dscc - cdc uncertainty
+ Delay = Delay + 2;
+ // sft
+ Delay = Delay + 1;
+ }
+
+ return Delay;
+}
+
+static bool CalculatePrefetchSchedule(
+ struct display_mode_lib *mode_lib,
+ double DPPCLK,
+ double DISPCLK,
+ double PixelClock,
+ double DCFClkDeepSleep,
+ unsigned int DSCDelay,
+ unsigned int DPPPerPlane,
+ bool ScalerEnabled,
+ unsigned int NumberOfCursors,
+ double DPPCLKDelaySubtotal,
+ double DPPCLKDelaySCL,
+ double DPPCLKDelaySCLLBOnly,
+ double DPPCLKDelayCNVCFormater,
+ double DPPCLKDelayCNVCCursor,
+ double DISPCLKDelaySubtotal,
+ unsigned int ScalerRecoutWidth,
+ enum output_format_class OutputFormat,
+ unsigned int VBlank,
+ unsigned int HTotal,
+ unsigned int MaxInterDCNTileRepeaters,
+ unsigned int VStartup,
+ unsigned int PageTableLevels,
+ bool VirtualMemoryEnable,
+ bool DynamicMetadataEnable,
+ unsigned int DynamicMetadataLinesBeforeActiveRequired,
+ unsigned int DynamicMetadataTransmittedBytes,
+ bool DCCEnable,
+ double UrgentLatency,
+ double UrgentExtraLatency,
+ double TCalc,
+ unsigned int PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ double PrefetchSourceLinesY,
+ unsigned int SwathWidthY,
+ double BytePerPixelDETY,
+ double VInitPreFillY,
+ unsigned int MaxNumSwathY,
+ double PrefetchSourceLinesC,
+ double BytePerPixelDETC,
+ double VInitPreFillC,
+ unsigned int MaxNumSwathC,
+ unsigned int SwathHeightY,
+ unsigned int SwathHeightC,
+ double TWait,
+ bool XFCEnabled,
+ double XFCRemoteSurfaceFlipDelay,
+ bool InterlaceEnable,
+ bool ProgressiveToInterlaceUnitInOPP,
+ double *DSTXAfterScaler,
+ double *DSTYAfterScaler,
+ double *DestinationLinesForPrefetch,
+ double *PrefetchBandwidth,
+ double *DestinationLinesToRequestVMInVBlank,
+ double *DestinationLinesToRequestRowInVBlank,
+ double *VRatioPrefetchY,
+ double *VRatioPrefetchC,
+ double *RequiredPrefetchPixDataBW,
+ unsigned int *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ double *Tno_bw,
+ unsigned int *VUpdateOffsetPix,
+ unsigned int *VUpdateWidthPix,
+ unsigned int *VReadyOffsetPix)
+{
+ bool MyError = false;
+ unsigned int DPPCycles, DISPCLKCycles;
+ double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime;
+ double Tdm, LineTime, Tsetup;
+ double dst_y_prefetch_equ;
+ double Tsw_oto;
+ double prefetch_bw_oto;
+ double Tvm_oto;
+ double Tr0_oto;
+ double Tpre_oto;
+ double dst_y_prefetch_oto;
+ double TimeForFetchingMetaPTE = 0;
+ double TimeForFetchingRowInVBlank = 0;
+ double LinesToRequestPrefetchPixelData = 0;
+
+ if (ScalerEnabled)
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCL;
+ else
+ DPPCycles = DPPCLKDelaySubtotal + DPPCLKDelaySCLLBOnly;
+
+ DPPCycles = DPPCycles + DPPCLKDelayCNVCFormater + NumberOfCursors * DPPCLKDelayCNVCCursor;
+
+ DISPCLKCycles = DISPCLKDelaySubtotal;
+
+ if (DPPCLK == 0.0 || DISPCLK == 0.0)
+ return true;
+
+ *DSTXAfterScaler = DPPCycles * PixelClock / DPPCLK + DISPCLKCycles * PixelClock / DISPCLK
+ + DSCDelay;
+
+ if (DPPPerPlane > 1)
+ *DSTXAfterScaler = *DSTXAfterScaler + ScalerRecoutWidth;
+
+ if (OutputFormat == dm_420 || (InterlaceEnable && ProgressiveToInterlaceUnitInOPP))
+ *DSTYAfterScaler = 1;
+ else
+ *DSTYAfterScaler = 0;
+
+ DSTTotalPixelsAfterScaler = ((double) (*DSTYAfterScaler * HTotal)) + *DSTXAfterScaler;
+ *DSTYAfterScaler = dml_floor(DSTTotalPixelsAfterScaler / HTotal, 1);
+ *DSTXAfterScaler = DSTTotalPixelsAfterScaler - ((double) (*DSTYAfterScaler * HTotal));
+
+ *VUpdateOffsetPix = dml_ceil(HTotal / 4.0, 1);
+ TotalRepeaterDelayTime = MaxInterDCNTileRepeaters * (2.0 / DPPCLK + 3.0 / DISPCLK);
+ *VUpdateWidthPix = (14.0 / DCFClkDeepSleep + 12.0 / DPPCLK + TotalRepeaterDelayTime)
+ * PixelClock;
+
+ *VReadyOffsetPix = dml_max(
+ 150.0 / DPPCLK,
+ TotalRepeaterDelayTime + 20.0 / DCFClkDeepSleep + 10.0 / DPPCLK)
+ * PixelClock;
+
+ Tsetup = (double) (*VUpdateOffsetPix + *VUpdateWidthPix + *VReadyOffsetPix) / PixelClock;
+
+ LineTime = (double) HTotal / PixelClock;
+
+ if (DynamicMetadataEnable) {
+ double Tdmbf, Tdmec, Tdmsks;
+
+ Tdm = dml_max(0.0, UrgentExtraLatency - TCalc);
+ Tdmbf = DynamicMetadataTransmittedBytes / 4.0 / DISPCLK;
+ Tdmec = LineTime;
+ if (DynamicMetadataLinesBeforeActiveRequired == 0)
+ Tdmsks = VBlank * LineTime / 2.0;
+ else
+ Tdmsks = DynamicMetadataLinesBeforeActiveRequired * LineTime;
+ if (InterlaceEnable && !ProgressiveToInterlaceUnitInOPP)
+ Tdmsks = Tdmsks / 2;
+ if (VStartup * LineTime
+ < Tsetup + TWait + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) {
+ MyError = true;
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = (Tsetup + TWait
+ + UrgentExtraLatency + Tdmbf + Tdmec + Tdmsks) / LineTime;
+ } else
+ *VStartupRequiredWhenNotEnoughTimeForDynamicMetadata = 0.0;
+ } else
+ Tdm = 0;
+
+ if (VirtualMemoryEnable) {
+ if (PageTableLevels == 4)
+ *Tno_bw = UrgentExtraLatency + UrgentLatency;
+ else if (PageTableLevels == 3)
+ *Tno_bw = UrgentExtraLatency;
+ else
+ *Tno_bw = 0;
+ } else if (DCCEnable)
+ *Tno_bw = LineTime;
+ else
+ *Tno_bw = LineTime / 4;
+
+ dst_y_prefetch_equ = VStartup - dml_max(TCalc + TWait, XFCRemoteSurfaceFlipDelay) / LineTime
+ - (Tsetup + Tdm) / LineTime
+ - (*DSTYAfterScaler + *DSTXAfterScaler / HTotal);
+
+ Tsw_oto = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime;
+
+ prefetch_bw_oto = (MetaRowByte + PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2 * dml_ceil(BytePerPixelDETC, 2))
+ / Tsw_oto;
+
+ if (VirtualMemoryEnable == true) {
+ Tvm_oto =
+ dml_max(
+ *Tno_bw + PDEAndMetaPTEBytesFrame / prefetch_bw_oto,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (PageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else
+ Tvm_oto = LineTime / 4.0;
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ Tr0_oto = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow) / prefetch_bw_oto,
+ dml_max(UrgentLatency, dml_max(LineTime - Tvm_oto, LineTime / 4)));
+ } else
+ Tr0_oto = LineTime - Tvm_oto;
+
+ Tpre_oto = Tvm_oto + Tr0_oto + Tsw_oto;
+
+ dst_y_prefetch_oto = Tpre_oto / LineTime;
+
+ if (dst_y_prefetch_oto < dst_y_prefetch_equ)
+ *DestinationLinesForPrefetch = dst_y_prefetch_oto;
+ else
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+
+ *DestinationLinesForPrefetch = dml_floor(4.0 * (*DestinationLinesForPrefetch + 0.125), 1)
+ / 4;
+
+ dml_print("DML: VStartup: %d\n", VStartup);
+ dml_print("DML: TCalc: %f\n", TCalc);
+ dml_print("DML: TWait: %f\n", TWait);
+ dml_print("DML: XFCRemoteSurfaceFlipDelay: %f\n", XFCRemoteSurfaceFlipDelay);
+ dml_print("DML: LineTime: %f\n", LineTime);
+ dml_print("DML: Tsetup: %f\n", Tsetup);
+ dml_print("DML: Tdm: %f\n", Tdm);
+ dml_print("DML: DSTYAfterScaler: %f\n", *DSTYAfterScaler);
+ dml_print("DML: DSTXAfterScaler: %f\n", *DSTXAfterScaler);
+ dml_print("DML: HTotal: %d\n", HTotal);
+
+ *PrefetchBandwidth = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ if (*DestinationLinesForPrefetch > 1) {
+ *PrefetchBandwidth = (PDEAndMetaPTEBytesFrame + 2 * MetaRowByte
+ + 2 * PixelPTEBytesPerRow
+ + PrefetchSourceLinesY * SwathWidthY * dml_ceil(BytePerPixelDETY, 1)
+ + PrefetchSourceLinesC * SwathWidthY / 2
+ * dml_ceil(BytePerPixelDETC, 2))
+ / (*DestinationLinesForPrefetch * LineTime - *Tno_bw);
+ if (VirtualMemoryEnable) {
+ TimeForFetchingMetaPTE =
+ dml_max(
+ *Tno_bw
+ + (double) PDEAndMetaPTEBytesFrame
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (PageTableLevels
+ - 1),
+ LineTime / 4));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingMetaPTE = LineTime / 4;
+ else
+ TimeForFetchingMetaPTE = 0.0;
+ }
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ TimeForFetchingRowInVBlank =
+ dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / *PrefetchBandwidth,
+ dml_max(
+ UrgentLatency,
+ dml_max(
+ LineTime
+ - TimeForFetchingMetaPTE,
+ LineTime
+ / 4.0)));
+ } else {
+ if (NumberOfCursors > 0 || XFCEnabled)
+ TimeForFetchingRowInVBlank = LineTime - TimeForFetchingMetaPTE;
+ else
+ TimeForFetchingRowInVBlank = 0.0;
+ }
+
+ *DestinationLinesToRequestVMInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTE / LineTime + 0.125),
+ 1) / 4.0;
+
+ *DestinationLinesToRequestRowInVBlank = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlank / LineTime + 0.125),
+ 1) / 4.0;
+
+ LinesToRequestPrefetchPixelData =
+ *DestinationLinesForPrefetch
+ - ((NumberOfCursors > 0 || VirtualMemoryEnable
+ || DCCEnable) ?
+ (*DestinationLinesToRequestVMInVBlank
+ + *DestinationLinesToRequestRowInVBlank) :
+ 0.0);
+
+ if (LinesToRequestPrefetchPixelData > 0) {
+
+ *VRatioPrefetchY = (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ if ((SwathHeightY > 4) && (VInitPreFillY > 3)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillY - 3.0) / 2.0) {
+ *VRatioPrefetchY =
+ dml_max(
+ (double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData,
+ (double) MaxNumSwathY
+ * SwathHeightY
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillY
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchY = dml_max(*VRatioPrefetchY, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ }
+ }
+
+ *VRatioPrefetchC = (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData;
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+
+ if ((SwathHeightC > 4)) {
+ if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) {
+ *VRatioPrefetchC =
+ dml_max(
+ *VRatioPrefetchC,
+ (double) MaxNumSwathC
+ * SwathHeightC
+ / (LinesToRequestPrefetchPixelData
+ - (VInitPreFillC
+ - 3.0)
+ / 2.0));
+ *VRatioPrefetchC = dml_max(*VRatioPrefetchC, 1.0);
+ } else {
+ MyError = true;
+ *VRatioPrefetchC = 0;
+ }
+ }
+
+ *RequiredPrefetchPixDataBW =
+ DPPPerPlane
+ * ((double) PrefetchSourceLinesY
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETY,
+ 1)
+ + (double) PrefetchSourceLinesC
+ / LinesToRequestPrefetchPixelData
+ * dml_ceil(
+ BytePerPixelDETC,
+ 2)
+ / 2)
+ * SwathWidthY / LineTime;
+ } else {
+ MyError = true;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ } else {
+ MyError = true;
+ }
+
+ if (MyError) {
+ *PrefetchBandwidth = 0;
+ TimeForFetchingMetaPTE = 0;
+ TimeForFetchingRowInVBlank = 0;
+ *DestinationLinesToRequestVMInVBlank = 0;
+ *DestinationLinesToRequestRowInVBlank = 0;
+ *DestinationLinesForPrefetch = 0;
+ LinesToRequestPrefetchPixelData = 0;
+ *VRatioPrefetchY = 0;
+ *VRatioPrefetchC = 0;
+ *RequiredPrefetchPixDataBW = 0;
+ }
+
+ return MyError;
+}
+
+static double RoundToDFSGranularityUp(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_floor(VCOSpeed * 4 / Clock, 1);
+}
+
+static double RoundToDFSGranularityDown(double Clock, double VCOSpeed)
+{
+ return VCOSpeed * 4 / dml_ceil(VCOSpeed * 4 / Clock, 1);
+}
+
+static double CalculatePrefetchSourceLines(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double vtaps,
+ bool Interlace,
+ bool ProgressiveToInterlaceUnitInOPP,
+ unsigned int SwathHeight,
+ unsigned int ViewportYStart,
+ double *VInitPreFill,
+ unsigned int *MaxNumSwath)
+{
+ unsigned int MaxPartialSwath;
+
+ if (ProgressiveToInterlaceUnitInOPP)
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1) / 2.0, 1);
+ else
+ *VInitPreFill = dml_floor((VRatio + vtaps + 1 + Interlace * 0.5 * VRatio) / 2.0, 1);
+
+ if (!mode_lib->vba.IgnoreViewportPositioning) {
+
+ *MaxNumSwath = dml_ceil((*VInitPreFill - 1.0) / SwathHeight, 1) + 1.0;
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 2) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 2)
+ % SwathHeight;
+ MaxPartialSwath = dml_max(1U, MaxPartialSwath);
+
+ } else {
+
+ if (ViewportYStart != 0)
+ dml_print(
+ "WARNING DML: using viewport y position of 0 even though actual viewport y position is non-zero in prefetch source lines calculation\n");
+
+ *MaxNumSwath = dml_ceil(*VInitPreFill / SwathHeight, 1);
+
+ if (*VInitPreFill > 1.0)
+ MaxPartialSwath = (unsigned int) (*VInitPreFill - 1) % SwathHeight;
+ else
+ MaxPartialSwath = (unsigned int) (*VInitPreFill + SwathHeight - 1)
+ % SwathHeight;
+ }
+
+ return *MaxNumSwath * SwathHeight + MaxPartialSwath;
+}
+
+static unsigned int CalculateVMAndRowBytes(
+ struct display_mode_lib *mode_lib,
+ bool DCCEnable,
+ unsigned int BlockHeight256Bytes,
+ unsigned int BlockWidth256Bytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int SurfaceTiling,
+ unsigned int BytePerPixel,
+ enum scan_direction_class ScanDirection,
+ unsigned int ViewportWidth,
+ unsigned int ViewportHeight,
+ unsigned int SwathWidth,
+ bool VirtualMemoryEnable,
+ unsigned int VMMPageSize,
+ unsigned int PTEBufferSizeInRequests,
+ unsigned int PDEProcessingBufIn64KBReqs,
+ unsigned int Pitch,
+ unsigned int DCCMetaPitch,
+ unsigned int *MacroTileWidth,
+ unsigned int *MetaRowByte,
+ unsigned int *PixelPTEBytesPerRow,
+ bool *PTEBufferSizeNotExceeded,
+ unsigned int *dpte_row_height,
+ unsigned int *meta_row_height)
+{
+ unsigned int MetaRequestHeight;
+ unsigned int MetaRequestWidth;
+ unsigned int MetaSurfWidth;
+ unsigned int MetaSurfHeight;
+ unsigned int MPDEBytesFrame;
+ unsigned int MetaPTEBytesFrame;
+ unsigned int DCCMetaSurfaceBytes;
+
+ unsigned int MacroTileSizeBytes;
+ unsigned int MacroTileHeight;
+ unsigned int DPDE0BytesFrame;
+ unsigned int ExtraDPDEBytesFrame;
+ unsigned int PDEAndMetaPTEBytesFrame;
+
+ if (DCCEnable == true) {
+ MetaRequestHeight = 8 * BlockHeight256Bytes;
+ MetaRequestWidth = 8 * BlockWidth256Bytes;
+ if (ScanDirection == dm_horz) {
+ *meta_row_height = MetaRequestHeight;
+ MetaSurfWidth = dml_ceil((double) SwathWidth - 1, MetaRequestWidth)
+ + MetaRequestWidth;
+ *MetaRowByte = MetaSurfWidth * MetaRequestHeight * BytePerPixel / 256.0;
+ } else {
+ *meta_row_height = MetaRequestWidth;
+ MetaSurfHeight = dml_ceil((double) SwathWidth - 1, MetaRequestHeight)
+ + MetaRequestHeight;
+ *MetaRowByte = MetaSurfHeight * MetaRequestWidth * BytePerPixel / 256.0;
+ }
+ if (ScanDirection == dm_horz) {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(ViewportHeight - 1, 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ } else {
+ DCCMetaSurfaceBytes = DCCMetaPitch
+ * (dml_ceil(
+ (double) ViewportHeight - 1,
+ 64 * BlockHeight256Bytes)
+ + 64 * BlockHeight256Bytes) * BytePerPixel
+ / 256;
+ }
+ if (VirtualMemoryEnable == true) {
+ MetaPTEBytesFrame = (dml_ceil(
+ (double) (DCCMetaSurfaceBytes - VMMPageSize)
+ / (8 * VMMPageSize),
+ 1) + 1) * 64;
+ MPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 1);
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ }
+ } else {
+ MetaPTEBytesFrame = 0;
+ MPDEBytesFrame = 0;
+ *MetaRowByte = 0;
+ }
+
+ if (SurfaceTiling == dm_sw_linear) {
+ MacroTileSizeBytes = 256;
+ MacroTileHeight = 1;
+ } else if (SurfaceTiling == dm_sw_4kb_s || SurfaceTiling == dm_sw_4kb_s_x
+ || SurfaceTiling == dm_sw_4kb_d || SurfaceTiling == dm_sw_4kb_d_x) {
+ MacroTileSizeBytes = 4096;
+ MacroTileHeight = 4 * BlockHeight256Bytes;
+ } else if (SurfaceTiling == dm_sw_64kb_s || SurfaceTiling == dm_sw_64kb_s_t
+ || SurfaceTiling == dm_sw_64kb_s_x || SurfaceTiling == dm_sw_64kb_d
+ || SurfaceTiling == dm_sw_64kb_d_t || SurfaceTiling == dm_sw_64kb_d_x
+ || SurfaceTiling == dm_sw_64kb_r_x) {
+ MacroTileSizeBytes = 65536;
+ MacroTileHeight = 16 * BlockHeight256Bytes;
+ } else {
+ MacroTileSizeBytes = 262144;
+ MacroTileHeight = 32 * BlockHeight256Bytes;
+ }
+ *MacroTileWidth = MacroTileSizeBytes / BytePerPixel / MacroTileHeight;
+
+ if (VirtualMemoryEnable == true && mode_lib->vba.MaxPageTableLevels > 1) {
+ if (ScanDirection == dm_horz) {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ ViewportHeight
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ } else {
+ DPDE0BytesFrame =
+ 64
+ * (dml_ceil(
+ ((Pitch
+ * (dml_ceil(
+ (double) SwathWidth
+ - 1,
+ MacroTileHeight)
+ + MacroTileHeight)
+ * BytePerPixel)
+ - MacroTileSizeBytes)
+ / (8
+ * 2097152),
+ 1) + 1);
+ }
+ ExtraDPDEBytesFrame = 128 * (mode_lib->vba.MaxPageTableLevels - 2);
+ } else {
+ DPDE0BytesFrame = 0;
+ ExtraDPDEBytesFrame = 0;
+ }
+
+ PDEAndMetaPTEBytesFrame = MetaPTEBytesFrame + MPDEBytesFrame + DPDE0BytesFrame
+ + ExtraDPDEBytesFrame;
+
+ if (VirtualMemoryEnable == true) {
+ unsigned int PTERequestSize;
+ unsigned int PixelPTEReqHeight;
+ unsigned int PixelPTEReqWidth;
+ double FractionOfPTEReturnDrop;
+ unsigned int EffectivePDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ PixelPTEReqHeight = 1;
+ PixelPTEReqWidth = 8.0 * VMMPageSize / BytePerPixel;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ } else if (MacroTileSizeBytes == 4096) {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ if (ScanDirection == dm_horz)
+ FractionOfPTEReturnDrop = 0;
+ else
+ FractionOfPTEReturnDrop = 7 / 8;
+ } else if (VMMPageSize == 4096 && MacroTileSizeBytes > 4096) {
+ PixelPTEReqHeight = 16 * BlockHeight256Bytes;
+ PixelPTEReqWidth = 16 * BlockWidth256Bytes;
+ PTERequestSize = 128;
+ FractionOfPTEReturnDrop = 0;
+ } else {
+ PixelPTEReqHeight = MacroTileHeight;
+ PixelPTEReqWidth = 8 * *MacroTileWidth;
+ PTERequestSize = 64;
+ FractionOfPTEReturnDrop = 0;
+ }
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs / 2;
+ else
+ EffectivePDEProcessingBufIn64KBReqs = PDEProcessingBufIn64KBReqs;
+
+ if (SurfaceTiling == dm_sw_linear) {
+ *dpte_row_height =
+ dml_min(
+ 128,
+ 1
+ << (unsigned int) dml_floor(
+ dml_log2(
+ dml_min(
+ (double) PTEBufferSizeInRequests
+ * PixelPTEReqWidth,
+ EffectivePDEProcessingBufIn64KBReqs
+ * 65536.0
+ / BytePerPixel)
+ / Pitch),
+ 1));
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ (double) (Pitch * *dpte_row_height - 1)
+ / PixelPTEReqWidth,
+ 1) + 1);
+ } else if (ScanDirection == dm_horz) {
+ *dpte_row_height = PixelPTEReqHeight;
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(((double) SwathWidth - 1) / PixelPTEReqWidth, 1)
+ + 1);
+ } else {
+ *dpte_row_height = dml_min(PixelPTEReqWidth, *MacroTileWidth);
+ *PixelPTEBytesPerRow = PTERequestSize
+ * (dml_ceil(
+ ((double) SwathWidth - 1)
+ / PixelPTEReqHeight,
+ 1) + 1);
+ }
+ if (*PixelPTEBytesPerRow * (1 - FractionOfPTEReturnDrop)
+ <= 64 * PTEBufferSizeInRequests) {
+ *PTEBufferSizeNotExceeded = true;
+ } else {
+ *PTEBufferSizeNotExceeded = false;
+ }
+ } else {
+ *PixelPTEBytesPerRow = 0;
+ *PTEBufferSizeNotExceeded = true;
+ }
+
+ return PDEAndMetaPTEBytesFrame;
+}
+
+static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation(
+ struct display_mode_lib *mode_lib)
+{
+ unsigned int j, k;
+
+ mode_lib->vba.WritebackDISPCLK = 0.0;
+ mode_lib->vba.DISPCLKWithRamping = 0;
+ mode_lib->vba.DISPCLKWithoutRamping = 0;
+ mode_lib->vba.GlobalDPPCLK = 0.0;
+
+ // dml_ml->vba.DISPCLK and dml_ml->vba.DPPCLK Calculation
+ //
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k]) {
+ mode_lib->vba.WritebackDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_LUMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.htaps[k] > 6 || mode_lib->vba.vtaps[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPLuma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma = 2 * mode_lib->vba.PixelClock[k];
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = 0.0;
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma;
+ } else {
+ if (mode_lib->vba.HRatio[k] > 1) {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma =
+ mode_lib->vba.PixelClock[k]
+ * dml_max(
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2),
+ dml_max(
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k],
+ 1.0));
+
+ if ((mode_lib->vba.HTAPsChroma[k] > 6 || mode_lib->vba.VTAPsChroma[k] > 6)
+ && mode_lib->vba.DPPCLKUsingSingleDPPChroma
+ < 2 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma = 2
+ * mode_lib->vba.PixelClock[k];
+ }
+
+ mode_lib->vba.DPPCLKUsingSingleDPP[k] = dml_max(
+ mode_lib->vba.DPPCLKUsingSingleDPPLuma,
+ mode_lib->vba.DPPCLKUsingSingleDPPChroma);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k)
+ continue;
+ if (mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k] / 2
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ } else if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DISPCLKWithRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100)
+ * (1
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100));
+ mode_lib->vba.DISPCLKWithoutRamping =
+ dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.PixelClock[k]
+ * (1
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100));
+ }
+ }
+
+ mode_lib->vba.DISPCLKWithRamping = dml_max(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.WritebackDISPCLK);
+ mode_lib->vba.DISPCLKWithoutRamping = dml_max(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.WritebackDISPCLK);
+
+ ASSERT(mode_lib->vba.DISPCLKDPPCLKVCOSpeed != 0);
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity = RoundToDFSGranularityUp(
+ mode_lib->vba.DISPCLKWithoutRamping,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDispclkRoundedToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.soc.clock_limits[NumberOfStates - 1].dispclk_mhz,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ if (mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithoutRampingRoundedToDFSGranularity;
+ } else if (mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity
+ > mode_lib->vba.MaxDispclkRoundedToDFSGranularity) {
+ mode_lib->vba.DISPCLK_calculated = mode_lib->vba.MaxDispclkRoundedToDFSGranularity;
+ } else {
+ mode_lib->vba.DISPCLK_calculated =
+ mode_lib->vba.DISPCLKWithRampingRoundedToDFSGranularity;
+ }
+ DTRACE(" dispclk_mhz (calculated) = %f", mode_lib->vba.DISPCLK_calculated);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.DPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.DPPPerPlane[k]
+ * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100);
+ mode_lib->vba.GlobalDPPCLK = dml_max(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DPPCLK_calculated[k]);
+ }
+ mode_lib->vba.GlobalDPPCLK = RoundToDFSGranularityUp(
+ mode_lib->vba.GlobalDPPCLK,
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.DPPCLK_calculated[k] = mode_lib->vba.GlobalDPPCLK / 255
+ * dml_ceil(
+ mode_lib->vba.DPPCLK_calculated[k] * 255
+ / mode_lib->vba.GlobalDPPCLK,
+ 1);
+ DTRACE(" dppclk_mhz[%i] (calculated) = %f", k, mode_lib->vba.DPPCLK_calculated[k]);
+ }
+
+ // Urgent Watermark
+ mode_lib->vba.DCCEnabledAnyPlane = false;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.DCCEnabledAnyPlane = true;
+
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000)
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency / 100;
+
+ mode_lib->vba.ReturnBW = mode_lib->vba.ReturnBandwidthToDCN;
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ // Let's do this calculation again??
+ mode_lib->vba.ReturnBandwidthToDCN = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLK,
+ mode_lib->vba.FabricAndDRAMBandwidth * 1000);
+ mode_lib->vba.ReturnBW = adjust_ReturnBW(
+ mode_lib,
+ mode_lib->vba.ReturnBW,
+ mode_lib->vba.DCCEnabledAnyPlane,
+ mode_lib->vba.ReturnBandwidthToDCN);
+
+ DTRACE(" dcfclk_mhz = %f", mode_lib->vba.DCFCLK);
+ DTRACE(" return_bw_to_dcn = %f", mode_lib->vba.ReturnBandwidthToDCN);
+ DTRACE(" return_bus_bw = %f", mode_lib->vba.ReturnBW);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz)
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportWidth[k];
+ else
+ mode_lib->vba.SwathWidthSingleDPPY[k] = mode_lib->vba.ViewportHeight[k];
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == true)
+ MainPlaneDoesODMCombine = true;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[j] == true)
+ MainPlaneDoesODMCombine = true;
+
+ if (MainPlaneDoesODMCombine == true)
+ mode_lib->vba.SwathWidthY[k] = dml_min(
+ (double) mode_lib->vba.SwathWidthSingleDPPY[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]));
+ else
+ mode_lib->vba.SwathWidthY[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ mode_lib->vba.BytePerPixelDETY[k] = 8;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ mode_lib->vba.BytePerPixelDETY[k] = 4;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ mode_lib->vba.BytePerPixelDETY[k] = 2;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ mode_lib->vba.BytePerPixelDETY[k] = 1;
+ mode_lib->vba.BytePerPixelDETC[k] = 2;
+ } else { // dm_420_10
+ mode_lib->vba.BytePerPixelDETY[k] = 4.0 / 3.0;
+ mode_lib->vba.BytePerPixelDETC[k] = 8.0 / 3.0;
+ }
+ }
+
+ mode_lib->vba.TotalDataReadBandwidth = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ReadBandwidthPlaneLuma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ mode_lib->vba.ReadBandwidthPlaneChroma[k] = mode_lib->vba.SwathWidthSingleDPPY[k]
+ / 2 * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k] / 2;
+ DTRACE(
+ " read_bw[%i] = %fBps",
+ k,
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]);
+ mode_lib->vba.TotalDataReadBandwidth += mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k];
+ }
+
+ mode_lib->vba.TotalDCCActiveDPP = 0;
+ mode_lib->vba.TotalActiveDPP = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.TotalActiveDPP = mode_lib->vba.TotalActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ if (mode_lib->vba.DCCEnable[k])
+ mode_lib->vba.TotalDCCActiveDPP = mode_lib->vba.TotalDCCActiveDPP
+ + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32) / mode_lib->vba.DCFCLK
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.LastPixelOfLineExtraWatermark = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double DataFabricLineDeliveryTimeLuma, DataFabricLineDeliveryTimeChroma;
+
+ if (mode_lib->vba.VRatio[k] <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k] =
+ (double) mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+
+ DataFabricLineDeliveryTimeLuma = mode_lib->vba.SwathWidthSingleDPPY[k]
+ * mode_lib->vba.SwathHeightY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1)
+ / (mode_lib->vba.ReturnBW * mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / mode_lib->vba.TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark = dml_max(
+ mode_lib->vba.LastPixelOfLineExtraWatermark,
+ DataFabricLineDeliveryTimeLuma
+ - mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k]);
+
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] = 0.0;
+ else if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0)
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ * mode_lib->vba.DPPPerPlane[k]
+ / (mode_lib->vba.HRatio[k] / 2.0)
+ / mode_lib->vba.PixelClock[k];
+ else
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k] =
+ mode_lib->vba.SwathWidthY[k] / 2.0
+ / mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / mode_lib->vba.DPPCLK[k];
+
+ DataFabricLineDeliveryTimeChroma = mode_lib->vba.SwathWidthSingleDPPY[k] / 2.0
+ * mode_lib->vba.SwathHeightC[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2)
+ / (mode_lib->vba.ReturnBW
+ * mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / mode_lib->vba.TotalDataReadBandwidth);
+ mode_lib->vba.LastPixelOfLineExtraWatermark =
+ dml_max(
+ mode_lib->vba.LastPixelOfLineExtraWatermark,
+ DataFabricLineDeliveryTimeChroma
+ - mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
+ }
+
+ mode_lib->vba.UrgentExtraLatency = mode_lib->vba.UrgentRoundTripAndOutOfOrderLatency
+ + (mode_lib->vba.TotalActiveDPP * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalDCCActiveDPP
+ * mode_lib->vba.MetaChunkSize) * 1024.0
+ / mode_lib->vba.ReturnBW;
+
+ if (mode_lib->vba.VirtualMemoryEnable)
+ mode_lib->vba.UrgentExtraLatency += mode_lib->vba.TotalActiveDPP
+ * mode_lib->vba.PTEChunkSize * 1024.0 / mode_lib->vba.ReturnBW;
+
+ mode_lib->vba.UrgentWatermark = mode_lib->vba.UrgentLatency
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" urgent_extra_latency = %fus", mode_lib->vba.UrgentExtraLatency);
+ DTRACE(" wm_urgent = %fus", mode_lib->vba.UrgentWatermark);
+
+ mode_lib->vba.MemoryTripWatermark = mode_lib->vba.UrgentLatency;
+
+ mode_lib->vba.TotalActiveWriteback = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.WritebackEnable[k])
+ mode_lib->vba.TotalActiveWriteback = mode_lib->vba.TotalActiveWriteback + 1;
+ }
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackUrgentWatermark = mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_urgent = %fus", mode_lib->vba.WritebackUrgentWatermark);
+
+ // NB P-State/DRAM Clock Change Watermark
+ mode_lib->vba.DRAMClockChangeWatermark = mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.UrgentWatermark;
+
+ DTRACE(" wm_pstate_change = %fus", mode_lib->vba.DRAMClockChangeWatermark);
+
+ DTRACE(" calculating wb pstate watermark");
+ DTRACE(" total wb outputs %d", mode_lib->vba.TotalActiveWriteback);
+ DTRACE(" socclk frequency %f Mhz", mode_lib->vba.SOCCLK);
+
+ if (mode_lib->vba.TotalActiveWriteback <= 1)
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency;
+ else
+ mode_lib->vba.WritebackDRAMClockChangeWatermark =
+ mode_lib->vba.DRAMClockChangeLatency
+ + mode_lib->vba.WritebackLatency
+ + mode_lib->vba.WritebackChunkSize * 1024.0 / 32
+ / mode_lib->vba.SOCCLK;
+
+ DTRACE(" wm_wb_pstate %fus", mode_lib->vba.WritebackDRAMClockChangeWatermark);
+
+ // Stutter Efficiency
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.LinesInDETY[k] = mode_lib->vba.DETBufferSizeY[k]
+ / mode_lib->vba.BytePerPixelDETY[k] / mode_lib->vba.SwathWidthY[k];
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETY[k],
+ mode_lib->vba.SwathHeightY[k]);
+ mode_lib->vba.FullDETBufferingTimeY[k] =
+ mode_lib->vba.LinesInDETYRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k];
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.LinesInDETC[k] = mode_lib->vba.DETBufferSizeC[k]
+ / mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = dml_floor(
+ mode_lib->vba.LinesInDETC[k],
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.FullDETBufferingTimeC[k] =
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2);
+ } else {
+ mode_lib->vba.LinesInDETC[k] = 0;
+ mode_lib->vba.LinesInDETCRoundedDownToSwath[k] = 0;
+ mode_lib->vba.FullDETBufferingTimeC[k] = 999999;
+ }
+ }
+
+ mode_lib->vba.MinFullDETBufferingTime = 999999.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.FullDETBufferingTimeY[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeY[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ if (mode_lib->vba.FullDETBufferingTimeC[k]
+ < mode_lib->vba.MinFullDETBufferingTime) {
+ mode_lib->vba.MinFullDETBufferingTime =
+ mode_lib->vba.FullDETBufferingTimeC[k];
+ mode_lib->vba.FrameTimeForMinFullDETBufferingTime =
+ (double) mode_lib->vba.VTotal[k] * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ }
+ }
+
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond = 0.0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / mode_lib->vba.DCCRate[k]
+ / 1000;
+ } else {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000;
+ }
+ if (mode_lib->vba.DCCEnable[k]) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 256
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 256;
+ }
+ if (mode_lib->vba.VirtualMemoryEnable) {
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond =
+ mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ + mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ / 1000 / 512
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ / 1000 / 512;
+ }
+ }
+
+ mode_lib->vba.PartOfBurstThatFitsInROB =
+ dml_min(
+ mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth,
+ mode_lib->vba.ROBBufferSizeInKByte * 1024
+ * mode_lib->vba.TotalDataReadBandwidth
+ / (mode_lib->vba.AverageReadBandwidthGBytePerSecond
+ * 1000));
+ mode_lib->vba.StutterBurstTime = mode_lib->vba.PartOfBurstThatFitsInROB
+ * (mode_lib->vba.AverageReadBandwidthGBytePerSecond * 1000)
+ / mode_lib->vba.TotalDataReadBandwidth / mode_lib->vba.ReturnBW
+ + (mode_lib->vba.MinFullDETBufferingTime
+ * mode_lib->vba.TotalDataReadBandwidth
+ - mode_lib->vba.PartOfBurstThatFitsInROB)
+ / (mode_lib->vba.DCFCLK * 64);
+ if (mode_lib->vba.TotalActiveWriteback == 0) {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = (1
+ - (mode_lib->vba.SRExitTime + mode_lib->vba.StutterBurstTime)
+ / mode_lib->vba.MinFullDETBufferingTime) * 100;
+ } else {
+ mode_lib->vba.StutterEfficiencyNotIncludingVBlank = 0;
+ }
+
+ mode_lib->vba.SmallestVBlank = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.VBlankTime = (double) (mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k]) * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.VBlankTime = 0;
+ }
+ mode_lib->vba.SmallestVBlank = dml_min(
+ mode_lib->vba.SmallestVBlank,
+ mode_lib->vba.VBlankTime);
+ }
+
+ mode_lib->vba.StutterEfficiency = (mode_lib->vba.StutterEfficiencyNotIncludingVBlank / 100
+ * (mode_lib->vba.FrameTimeForMinFullDETBufferingTime
+ - mode_lib->vba.SmallestVBlank)
+ + mode_lib->vba.SmallestVBlank)
+ / mode_lib->vba.FrameTimeForMinFullDETBufferingTime * 100;
+
+ // dml_ml->vba.DCFCLK Deep Sleep
+ mode_lib->vba.DCFClkDeepSleep = 8.0;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) {
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.DCFCLKDeepSleepPerPlane =
+ dml_max(
+ 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k],
+ 1.1 * mode_lib->vba.SwathWidthY[k] / 2.0
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2) / 32
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeChroma[k]);
+ } else
+ mode_lib->vba.DCFCLKDeepSleepPerPlane = 1.1 * mode_lib->vba.SwathWidthY[k]
+ * dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1) / 64.0
+ / mode_lib->vba.DisplayPipeLineDeliveryTimeLuma[k];
+ mode_lib->vba.DCFCLKDeepSleepPerPlane = dml_max(
+ mode_lib->vba.DCFCLKDeepSleepPerPlane,
+ mode_lib->vba.PixelClock[k] / 16.0);
+ mode_lib->vba.DCFClkDeepSleep = dml_max(
+ mode_lib->vba.DCFClkDeepSleep,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane);
+
+ DTRACE(
+ " dcfclk_deepsleep_per_plane[%i] = %fMHz",
+ k,
+ mode_lib->vba.DCFCLKDeepSleepPerPlane);
+ }
+
+ DTRACE(" dcfclk_deepsleep_mhz = %fMHz", mode_lib->vba.DCFClkDeepSleep);
+
+ // Stutter Watermark
+ mode_lib->vba.StutterExitWatermark = mode_lib->vba.SRExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency + 10 / mode_lib->vba.DCFClkDeepSleep;
+ mode_lib->vba.StutterEnterPlusExitWatermark = mode_lib->vba.SREnterPlusExitTime
+ + mode_lib->vba.LastPixelOfLineExtraWatermark
+ + mode_lib->vba.UrgentExtraLatency;
+
+ DTRACE(" wm_cstate_exit = %fus", mode_lib->vba.StutterExitWatermark);
+ DTRACE(" wm_cstate_enter_exit = %fus", mode_lib->vba.StutterEnterPlusExitWatermark);
+
+ // Urgent Latency Supported
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.EffectiveDETPlusLBLinesLuma =
+ dml_floor(
+ mode_lib->vba.LinesInDETY[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETY[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
+ mode_lib->vba.SwathHeightY[k]);
+
+ mode_lib->vba.UrgentLatencySupportUsLuma = mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETPlusLBLinesLuma
+ * mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.BytePerPixelDETY[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma =
+ dml_floor(
+ mode_lib->vba.LinesInDETC[k]
+ + dml_min(
+ mode_lib->vba.LinesInDETC[k]
+ * mode_lib->vba.DPPCLK[k]
+ * mode_lib->vba.BytePerPixelDETC[k]
+ * mode_lib->vba.PSCL_THROUGHPUT_CHROMA[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]),
+ (double) mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
+ mode_lib->vba.SwathHeightC[k]);
+ mode_lib->vba.UrgentLatencySupportUsChroma =
+ mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k] / 2)
+ - mode_lib->vba.EffectiveDETPlusLBLinesChroma
+ * (mode_lib->vba.SwathWidthY[k]
+ / 2)
+ * mode_lib->vba.BytePerPixelDETC[k]
+ / (mode_lib->vba.ReturnBW
+ / mode_lib->vba.DPPPerPlane[k]);
+ mode_lib->vba.UrgentLatencySupportUs[k] = dml_min(
+ mode_lib->vba.UrgentLatencySupportUsLuma,
+ mode_lib->vba.UrgentLatencySupportUsChroma);
+ } else {
+ mode_lib->vba.UrgentLatencySupportUs[k] =
+ mode_lib->vba.UrgentLatencySupportUsLuma;
+ }
+ }
+
+ mode_lib->vba.MinUrgentLatencySupportUs = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MinUrgentLatencySupportUs = dml_min(
+ mode_lib->vba.MinUrgentLatencySupportUs,
+ mode_lib->vba.UrgentLatencySupportUs[k]);
+ }
+
+ // Non-Urgent Latency Tolerance
+ mode_lib->vba.NonUrgentLatencyTolerance = mode_lib->vba.MinUrgentLatencySupportUs
+ - mode_lib->vba.UrgentWatermark;
+
+ // DSCCLK
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.BlendingAndTiming[k] != k) || !mode_lib->vba.DSCEnabled[k]) {
+ mode_lib->vba.DSCCLK_calculated[k] = 0.0;
+ } else {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k] == dm_n422)
+ mode_lib->vba.DSCFormatFactor = 2;
+ else
+ mode_lib->vba.DSCFormatFactor = 1;
+ if (mode_lib->vba.ODMCombineEnabled[k])
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 6
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ else
+ mode_lib->vba.DSCCLK_calculated[k] =
+ mode_lib->vba.PixelClockBackEnd[k] / 3
+ / mode_lib->vba.DSCFormatFactor
+ / (1
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100);
+ }
+ }
+
+ // DSC Delay
+ // TODO
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double bpp = mode_lib->vba.OutputBpp[k];
+ unsigned int slices = mode_lib->vba.NumberOfDSCSlices[k];
+
+ if (mode_lib->vba.DSCEnabled[k] && bpp != 0) {
+ if (!mode_lib->vba.ODMCombineEnabled[k]) {
+ mode_lib->vba.DSCDelay[k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ mode_lib->vba.DSCDelay[k] =
+ 2
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ bpp,
+ dml_ceil(
+ (double) mode_lib->vba.HActive[k]
+ / mode_lib->vba.NumberOfDSCSlices[k],
+ 1),
+ slices / 2.0,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ mode_lib->vba.DSCDelay[k] = 0;
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) // NumberOfPlanes
+ if (j != k && mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.DSCEnabled[j])
+ mode_lib->vba.DSCDelay[k] = mode_lib->vba.DSCDelay[j];
+
+ // Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ unsigned int PDEAndMetaPTEBytesFrameY;
+ unsigned int PixelPTEBytesPerRowY;
+ unsigned int MetaRowByteY;
+ unsigned int MetaRowByteC;
+ unsigned int PDEAndMetaPTEBytesFrameC;
+ unsigned int PixelPTEBytesPerRowC;
+
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ dml_ceil(mode_lib->vba.BytePerPixelDETC[k], 2),
+ &mode_lib->vba.BlockHeight256BytesY[k],
+ &mode_lib->vba.BlockHeight256BytesC[k],
+ &mode_lib->vba.BlockWidth256BytesY[k],
+ &mode_lib->vba.BlockWidth256BytesC[k]);
+ PDEAndMetaPTEBytesFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesY[k],
+ mode_lib->vba.BlockWidth256BytesY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &MetaRowByteY,
+ &PixelPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchSourceLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.VInitPreFillY[k],
+ &mode_lib->vba.MaxNumSwathY[k]);
+
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_8)) {
+ PDEAndMetaPTEBytesFrameC =
+ CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.BlockHeight256BytesC[k],
+ mode_lib->vba.BlockWidth256BytesC[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETC[k],
+ 2),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2,
+ mode_lib->vba.ViewportHeight[k] / 2,
+ mode_lib->vba.SwathWidthY[k] / 2,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &MetaRowByteC,
+ &PixelPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceeded[mode_lib->vba.VoltageLevel],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchSourceLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightC[k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.VInitPreFillC[k],
+ &mode_lib->vba.MaxNumSwathC[k]);
+ } else {
+ PixelPTEBytesPerRowC = 0;
+ PDEAndMetaPTEBytesFrameC = 0;
+ MetaRowByteC = 0;
+ mode_lib->vba.MaxNumSwathC[k] = 0;
+ mode_lib->vba.PrefetchSourceLinesC[k] = 0;
+ }
+
+ mode_lib->vba.PixelPTEBytesPerRow[k] = PixelPTEBytesPerRowY + PixelPTEBytesPerRowC;
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k] = PDEAndMetaPTEBytesFrameY
+ + PDEAndMetaPTEBytesFrameC;
+ mode_lib->vba.MetaRowByte[k] = MetaRowByteY + MetaRowByteC;
+
+ CalculateActiveRowBandwidth(
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ MetaRowByteY,
+ MetaRowByteC,
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.meta_row_height_chroma[k],
+ PixelPTEBytesPerRowY,
+ PixelPTEBytesPerRowC,
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_bw[k],
+ &mode_lib->vba.dpte_row_bw[k],
+ &mode_lib->vba.qual_row_bw[k]);
+ }
+
+ mode_lib->vba.TCalc = 24.0 / mode_lib->vba.DCFClkDeepSleep;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.DISPCLK;
+ } else
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] = 0;
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j] == true) {
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ dml_max(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.DISPCLK);
+ }
+ }
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j)
+ if (mode_lib->vba.BlendingAndTiming[k] == j)
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k] =
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][j];
+
+ mode_lib->vba.VStartupLines = 13;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.MaxVStartupLines[k] =
+ mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(
+ 1.0,
+ dml_ceil(
+ mode_lib->vba.WritebackDelay[mode_lib->vba.VoltageLevel][k]
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1));
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k)
+ mode_lib->vba.MaximumMaxVStartupLines = dml_max(
+ mode_lib->vba.MaximumMaxVStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.cursor_bw[k] = 0.0;
+ for (j = 0; j < mode_lib->vba.NumberOfCursors[k]; ++j)
+ mode_lib->vba.cursor_bw[k] += mode_lib->vba.CursorWidth[k][j]
+ * mode_lib->vba.CursorBPP[k][j] / 8.0
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ }
+
+ do {
+ double MaxTotalRDBandwidth = 0;
+ bool DestinationLineTimesForPrefetchLessThan2 = false;
+ bool VRatioPrefetchMoreThan4 = false;
+ bool prefetch_vm_bw_valid = true;
+ bool prefetch_row_bw_valid = true;
+ double TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelDETY[k],
+ 1),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0;
+ }
+ mode_lib->vba.ErrorResult[k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.DPPCLK[k],
+ mode_lib->vba.DISPCLK,
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.DCFClkDeepSleep,
+ mode_lib->vba.DSCDelay[k],
+ mode_lib->vba.DPPPerPlane[k],
+ mode_lib->vba.ScalerEnabled[k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ (unsigned int) (mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.HRatio[k]),
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]),
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.TCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.PrefetchSourceLinesY[k],
+ mode_lib->vba.SwathWidthY[k],
+ mode_lib->vba.BytePerPixelDETY[k],
+ mode_lib->vba.VInitPreFillY[k],
+ mode_lib->vba.MaxNumSwathY[k],
+ mode_lib->vba.PrefetchSourceLinesC[k],
+ mode_lib->vba.BytePerPixelDETC[k],
+ mode_lib->vba.VInitPreFillC[k],
+ mode_lib->vba.MaxNumSwathC[k],
+ mode_lib->vba.SwathHeightY[k],
+ mode_lib->vba.SwathHeightC[k],
+ TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &mode_lib->vba.DSTXAfterScaler[k],
+ &mode_lib->vba.DSTYAfterScaler[k],
+ &mode_lib->vba.DestinationLinesForPrefetch[k],
+ &mode_lib->vba.PrefetchBandwidth[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInVBlank[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInVBlank[k],
+ &mode_lib->vba.VRatioPrefetchY[k],
+ &mode_lib->vba.VRatioPrefetchC[k],
+ &mode_lib->vba.RequiredPrefetchPixDataBW[k],
+ &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.VStartup[k] = dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[k]);
+ if (mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata
+ != 0) {
+ mode_lib->vba.VStartup[k] =
+ mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+ }
+ } else {
+ mode_lib->vba.VStartup[k] =
+ dml_min(
+ mode_lib->vba.VStartupLines,
+ mode_lib->vba.MaxVStartupLines[mode_lib->vba.BlendingAndTiming[k]]);
+ }
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+
+ if (mode_lib->vba.PDEAndMetaPTEBytesFrame[k] == 0)
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_vm_bw[k] =
+ (double) mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ / (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_vm_bw[k] = 0;
+ prefetch_vm_bw_valid = false;
+ }
+ if (mode_lib->vba.MetaRowByte[k] + mode_lib->vba.PixelPTEBytesPerRow[k]
+ == 0)
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ else if (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k] > 0) {
+ mode_lib->vba.prefetch_row_bw[k] =
+ (double) (mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k])
+ / (mode_lib->vba.DestinationLinesToRequestRowInVBlank[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_row_bw[k] = 0;
+ prefetch_row_bw_valid = false;
+ }
+
+ MaxTotalRDBandwidth =
+ MaxTotalRDBandwidth + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBW[k])
+ + mode_lib->vba.meta_row_bw[k]
+ + mode_lib->vba.dpte_row_bw[k]));
+
+ if (mode_lib->vba.DestinationLinesForPrefetch[k] < 2)
+ DestinationLineTimesForPrefetchLessThan2 = true;
+ if (mode_lib->vba.VRatioPrefetchY[k] > 4
+ || mode_lib->vba.VRatioPrefetchC[k] > 4)
+ VRatioPrefetchMoreThan4 = true;
+ }
+
+ if (MaxTotalRDBandwidth <= mode_lib->vba.ReturnBW && prefetch_vm_bw_valid
+ && prefetch_row_bw_valid && !VRatioPrefetchMoreThan4
+ && !DestinationLineTimesForPrefetchLessThan2)
+ mode_lib->vba.PrefetchModeSupported = true;
+ else {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Bandwidth violation. Results are NOT valid\n");
+ }
+
+ if (mode_lib->vba.PrefetchModeSupported == true) {
+ double final_flip_bw[DC__NUM_DPP__MAX];
+ unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
+ double total_dcn_read_bw_with_flip = 0;
+
+ mode_lib->vba.BandwidthAvailableForImmediateFlip = mode_lib->vba.ReturnBW;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k]
+ + mode_lib->vba.qual_row_bw[k],
+ mode_lib->vba.PrefetchBandwidth[k]);
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ ImmediateFlipBytes[k] = 0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k]
+ + mode_lib->vba.MetaRowByte[k]
+ + mode_lib->vba.PixelPTEBytesPerRow[k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + ImmediateFlipBytes[k];
+ }
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.UrgentExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesFrame[k],
+ mode_lib->vba.MetaRowByte[k],
+ mode_lib->vba.PixelPTEBytesPerRow[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ total_dcn_read_bw_with_flip =
+ total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max(
+ mode_lib->vba.prefetch_vm_bw[k],
+ dml_max(
+ mode_lib->vba.prefetch_row_bw[k],
+ final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidthPlaneLuma[k]
+ + mode_lib->vba.ReadBandwidthPlaneChroma[k],
+ mode_lib->vba.RequiredPrefetchPixDataBW[k])));
+ }
+ mode_lib->vba.ImmediateFlipSupported = true;
+ if (total_dcn_read_bw_with_flip > mode_lib->vba.ReturnBW) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupported = false;
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ErrorResult[k]) {
+ mode_lib->vba.PrefetchModeSupported = false;
+ dml_print(
+ "DML: CalculatePrefetchSchedule ***failed***. Prefetch schedule violation. Results are NOT valid\n");
+ }
+ }
+
+ mode_lib->vba.VStartupLines = mode_lib->vba.VStartupLines + 1;
+ } while (!((mode_lib->vba.PrefetchModeSupported
+ && (!mode_lib->vba.ImmediateFlipSupport
+ || mode_lib->vba.ImmediateFlipSupported))
+ || mode_lib->vba.MaximumMaxVStartupLines < mode_lib->vba.VStartupLines));
+
+ //Display Pipeline Delivery Time in Prefetch
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.VRatioPrefetchY[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k] * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeLumaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ if (mode_lib->vba.BytePerPixelDETC[k] == 0) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = 0;
+ } else {
+ if (mode_lib->vba.VRatioPrefetchC[k] <= 1) {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ * mode_lib->vba.DPPPerPlane[k]
+ / mode_lib->vba.HRatio[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.DisplayPipeLineDeliveryTimeChromaPrefetch[k] =
+ mode_lib->vba.SwathWidthY[k]
+ / mode_lib->vba.PSCL_THROUGHPUT_LUMA[k]
+ / mode_lib->vba.DPPCLK[k];
+ }
+ }
+ }
+
+ // Min TTUVBlank
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode == 0) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = true;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.DRAMClockChangeWatermark,
+ dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark));
+ } else if (mode_lib->vba.PrefetchMode == 1) {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = true;
+ mode_lib->vba.MinTTUVBlank[k] = dml_max(
+ mode_lib->vba.StutterEnterPlusExitWatermark,
+ mode_lib->vba.UrgentWatermark);
+ } else {
+ mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k] = false;
+ mode_lib->vba.AllowDRAMSelfRefreshDuringVBlank[k] = false;
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.UrgentWatermark;
+ }
+ if (!mode_lib->vba.DynamicMetadataEnable[k])
+ mode_lib->vba.MinTTUVBlank[k] = mode_lib->vba.TCalc
+ + mode_lib->vba.MinTTUVBlank[k];
+ }
+
+ // DCC Configuration
+ mode_lib->vba.ActiveDPPs = 0;
+ // NB P-State/DRAM Clock Change Support
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.ActiveDPPs = mode_lib->vba.ActiveDPPs + mode_lib->vba.DPPPerPlane[k];
+ }
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ double EffectiveLBLatencyHidingY;
+ double EffectiveLBLatencyHidingC;
+ double DPPOutputBufferLinesY;
+ double DPPOutputBufferLinesC;
+ double DPPOPPBufferingY;
+ double MaxDETBufferingTimeY;
+ double ActiveDRAMClockChangeLatencyMarginY;
+
+ mode_lib->vba.LBLatencyHidingSourceLinesY =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)),
+ 1)) - (mode_lib->vba.vtaps[k] - 1);
+
+ mode_lib->vba.LBLatencyHidingSourceLinesC =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ (unsigned int) dml_floor(
+ (double) mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthY[k]
+ / 2.0
+ / dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2,
+ 1.0)),
+ 1))
+ - (mode_lib->vba.VTAPsChroma[k] - 1);
+
+ EffectiveLBLatencyHidingY = mode_lib->vba.LBLatencyHidingSourceLinesY
+ / mode_lib->vba.VRatio[k]
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ EffectiveLBLatencyHidingC = mode_lib->vba.LBLatencyHidingSourceLinesC
+ / (mode_lib->vba.VRatio[k] / 2)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+
+ if (mode_lib->vba.SwathWidthY[k] > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = mode_lib->vba.DPPOutputBufferPixels
+ / mode_lib->vba.SwathWidthY[k];
+ } else if (mode_lib->vba.SwathWidthY[k] > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesY = 0.5;
+ } else {
+ DPPOutputBufferLinesY = 1;
+ }
+
+ if (mode_lib->vba.SwathWidthY[k] / 2 > 2 * mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = mode_lib->vba.DPPOutputBufferPixels
+ / (mode_lib->vba.SwathWidthY[k] / 2);
+ } else if (mode_lib->vba.SwathWidthY[k] / 2 > mode_lib->vba.DPPOutputBufferPixels) {
+ DPPOutputBufferLinesC = 0.5;
+ } else {
+ DPPOutputBufferLinesC = 1;
+ }
+
+ DPPOPPBufferingY = (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesY + mode_lib->vba.OPPOutputBufferLines);
+ MaxDETBufferingTimeY = mode_lib->vba.FullDETBufferingTimeY[k]
+ + (mode_lib->vba.LinesInDETY[k]
+ - mode_lib->vba.LinesInDETYRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+
+ ActiveDRAMClockChangeLatencyMarginY = DPPOPPBufferingY + EffectiveLBLatencyHidingY
+ + MaxDETBufferingTimeY - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginY =
+ ActiveDRAMClockChangeLatencyMarginY
+ - (1 - 1 / (mode_lib->vba.ActiveDPPs - 1))
+ * mode_lib->vba.SwathHeightY[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+
+ if (mode_lib->vba.BytePerPixelDETC[k] > 0) {
+ double DPPOPPBufferingC = (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * (DPPOutputBufferLinesC
+ + mode_lib->vba.OPPOutputBufferLines);
+ double MaxDETBufferingTimeC =
+ mode_lib->vba.FullDETBufferingTimeC[k]
+ + (mode_lib->vba.LinesInDETC[k]
+ - mode_lib->vba.LinesInDETCRoundedDownToSwath[k])
+ / mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ double ActiveDRAMClockChangeLatencyMarginC = DPPOPPBufferingC
+ + EffectiveLBLatencyHidingC + MaxDETBufferingTimeC
+ - mode_lib->vba.DRAMClockChangeWatermark;
+
+ if (mode_lib->vba.ActiveDPPs > 1) {
+ ActiveDRAMClockChangeLatencyMarginC =
+ ActiveDRAMClockChangeLatencyMarginC
+ - (1
+ - 1
+ / (mode_lib->vba.ActiveDPPs
+ - 1))
+ * mode_lib->vba.SwathHeightC[k]
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ ActiveDRAMClockChangeLatencyMarginY,
+ ActiveDRAMClockChangeLatencyMarginC);
+ } else {
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] =
+ ActiveDRAMClockChangeLatencyMarginY;
+ }
+
+ if (mode_lib->vba.WritebackEnable[k]) {
+ double WritebackDRAMClockChangeLatencyMargin;
+
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ WritebackDRAMClockChangeLatencyMargin =
+ (double) (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ * 4)
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else if (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize
+ * 8.0 / 10,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize
+ * 8 / 10)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ } else {
+ WritebackDRAMClockChangeLatencyMargin =
+ dml_min(
+ (double) mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / (mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]))
+ - mode_lib->vba.WritebackDRAMClockChangeWatermark;
+ }
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k] = dml_min(
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k],
+ WritebackDRAMClockChangeLatencyMargin);
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < mode_lib->vba.MinActiveDRAMClockChangeMargin) {
+ mode_lib->vba.MinActiveDRAMClockChangeMargin =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
+
+ mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
+ mode_lib->vba.MinActiveDRAMClockChangeMargin
+ + mode_lib->vba.DRAMClockChangeLatency;
+
+ if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vactive;
+ } else {
+ if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_vblank;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
+ mode_lib->vba.DRAMClockChangeSupport =
+ dm_dram_clock_change_unsupported;
+ }
+ }
+ } else {
+ mode_lib->vba.DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
+ }
+ }
+
+ //XFC Parameters:
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ double TWait;
+
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = mode_lib->vba.XFCTSlvVupdateOffset;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = mode_lib->vba.XFCTSlvVupdateWidth;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = mode_lib->vba.XFCTSlvVreadyOffset;
+ TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthY[k],
+ dml_ceil(mode_lib->vba.BytePerPixelDETY[k], 1),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TCalc,
+ TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] =
+ dml_floor(
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCTransferDelay[k] =
+ dml_ceil(
+ mode_lib->vba.XFCBusTransportTime
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.XFCPrechargeDelay[k] =
+ dml_ceil(
+ (mode_lib->vba.XFCBusTransportTime
+ + mode_lib->vba.TInitXFill
+ + mode_lib->vba.TslvChk)
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1);
+ mode_lib->vba.InitFillLevel = mode_lib->vba.XFCXBUFLatencyTolerance
+ * mode_lib->vba.SrcActiveDrainRate;
+ mode_lib->vba.FinalFillMargin =
+ (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.XFCFillConstant;
+ mode_lib->vba.FinalFillLevel = mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ * mode_lib->vba.SrcActiveDrainRate
+ + mode_lib->vba.FinalFillMargin;
+ mode_lib->vba.RemainingFillLevel = dml_max(
+ 0.0,
+ mode_lib->vba.FinalFillLevel - mode_lib->vba.InitFillLevel);
+ mode_lib->vba.TFinalxFill = mode_lib->vba.RemainingFillLevel
+ / (mode_lib->vba.SrcActiveDrainRate
+ * mode_lib->vba.XFCFillBWOverhead / 100);
+ mode_lib->vba.XFCPrefetchMargin[k] =
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay
+ + mode_lib->vba.TFinalxFill
+ + (mode_lib->vba.DestinationLinesToRequestVMInVBlank[k]
+ + mode_lib->vba.DestinationLinesToRequestRowInVBlank[k])
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k];
+ } else {
+ mode_lib->vba.XFCSlaveVUpdateOffset[k] = 0;
+ mode_lib->vba.XFCSlaveVupdateWidth[k] = 0;
+ mode_lib->vba.XFCSlaveVReadyOffset[k] = 0;
+ mode_lib->vba.XFCRemoteSurfaceFlipLatency[k] = 0;
+ mode_lib->vba.XFCPrechargeDelay[k] = 0;
+ mode_lib->vba.XFCTransferDelay[k] = 0;
+ mode_lib->vba.XFCPrefetchMargin[k] = 0;
+ }
+ }
+}
+
+static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib)
+{
+ double BytePerPixDETY;
+ double BytePerPixDETC;
+ double Read256BytesBlockHeightY;
+ double Read256BytesBlockHeightC;
+ double Read256BytesBlockWidthY;
+ double Read256BytesBlockWidthC;
+ double MaximumSwathHeightY;
+ double MaximumSwathHeightC;
+ double MinimumSwathHeightY;
+ double MinimumSwathHeightC;
+ double SwathWidth;
+ double SwathWidthGranularityY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesY;
+ double RoundedUpMaxSwathSizeBytesC;
+ unsigned int j, k;
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ bool MainPlaneDoesODMCombine = false;
+
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ BytePerPixDETY = 8;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ BytePerPixDETY = 4;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ BytePerPixDETY = 2;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ BytePerPixDETY = 1;
+ BytePerPixDETC = 2;
+ } else {
+ BytePerPixDETY = 4.0 / 3.0;
+ BytePerPixDETC = 8.0 / 3.0;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ Read256BytesBlockHeightY = 4;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16) {
+ Read256BytesBlockHeightY = 8;
+ } else {
+ Read256BytesBlockHeightY = 16;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockHeightC = 0;
+ Read256BytesBlockWidthC = 0;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ Read256BytesBlockHeightY = 1;
+ Read256BytesBlockHeightC = 1;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ Read256BytesBlockHeightY = 16;
+ Read256BytesBlockHeightC = 8;
+ } else {
+ Read256BytesBlockHeightY = 8;
+ Read256BytesBlockHeightC = 8;
+ }
+ Read256BytesBlockWidthY = 256 / dml_ceil(BytePerPixDETY, 1)
+ / Read256BytesBlockHeightY;
+ Read256BytesBlockWidthC = 256 / dml_ceil(BytePerPixDETC, 2)
+ / Read256BytesBlockHeightC;
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ MaximumSwathHeightY = Read256BytesBlockHeightY;
+ MaximumSwathHeightC = Read256BytesBlockHeightC;
+ } else {
+ MaximumSwathHeightY = Read256BytesBlockWidthY;
+ MaximumSwathHeightC = Read256BytesBlockWidthC;
+ }
+
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_8
+ && mode_lib->vba.SourceScan[k] != dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ }
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightY = MaximumSwathHeightY / 2.0;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ MinimumSwathHeightC = MaximumSwathHeightC / 2.0;
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ } else {
+ MinimumSwathHeightY = MaximumSwathHeightY;
+ MinimumSwathHeightC = MaximumSwathHeightC;
+ }
+ }
+
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ SwathWidth = mode_lib->vba.ViewportWidth[k];
+ } else {
+ SwathWidth = mode_lib->vba.ViewportHeight[k];
+ }
+
+ if (mode_lib->vba.ODMCombineEnabled[k] == true) {
+ MainPlaneDoesODMCombine = true;
+ }
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.ODMCombineEnabled[j] == true) {
+ MainPlaneDoesODMCombine = true;
+ }
+ }
+
+ if (MainPlaneDoesODMCombine == true) {
+ SwathWidth = dml_min(
+ SwathWidth,
+ mode_lib->vba.HActive[k] / 2.0 * mode_lib->vba.HRatio[k]);
+ } else {
+ SwathWidth = SwathWidth / mode_lib->vba.DPPPerPlane[k];
+ }
+
+ SwathWidthGranularityY = 256 / dml_ceil(BytePerPixDETY, 1) / MaximumSwathHeightY;
+ RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ (double) (SwathWidth - 1),
+ SwathWidthGranularityY) + SwathWidthGranularityY) * BytePerPixDETY
+ * MaximumSwathHeightY;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesY = dml_ceil(RoundedUpMaxSwathSizeBytesY, 256)
+ + 256;
+ }
+ if (MaximumSwathHeightC > 0) {
+ SwathWidthGranularityC = 256.0 / dml_ceil(BytePerPixDETC, 2)
+ / MaximumSwathHeightC;
+ RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ (double) (SwathWidth / 2.0 - 1),
+ SwathWidthGranularityC) + SwathWidthGranularityC)
+ * BytePerPixDETC * MaximumSwathHeightC;
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ RoundedUpMaxSwathSizeBytesC,
+ 256) + 256;
+ }
+ } else
+ RoundedUpMaxSwathSizeBytesC = 0.0;
+
+ if (RoundedUpMaxSwathSizeBytesY + RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightY[k] = MaximumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MaximumSwathHeightC;
+ } else {
+ mode_lib->vba.SwathHeightY[k] = MinimumSwathHeightY;
+ mode_lib->vba.SwathHeightC[k] = MinimumSwathHeightC;
+ }
+
+ if (mode_lib->vba.SwathHeightC[k] == 0) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte * 1024;
+ mode_lib->vba.DETBufferSizeC[k] = 0;
+ } else if (mode_lib->vba.SwathHeightY[k] <= mode_lib->vba.SwathHeightC[k]) {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2;
+ } else {
+ mode_lib->vba.DETBufferSizeY[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 * 2 / 3;
+ mode_lib->vba.DETBufferSizeC[k] = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 3;
+ }
+ }
+}
+
+bool Calculate256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC)
+{
+ if ((SourcePixelFormat == dm_444_64 || SourcePixelFormat == dm_444_32
+ || SourcePixelFormat == dm_444_16
+ || SourcePixelFormat == dm_444_8)) {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ } else if (SourcePixelFormat == dm_444_64) {
+ *BlockHeight256BytesY = 4;
+ } else if (SourcePixelFormat == dm_444_8) {
+ *BlockHeight256BytesY = 16;
+ } else {
+ *BlockHeight256BytesY = 8;
+ }
+ *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
+ *BlockHeight256BytesC = 0;
+ *BlockWidth256BytesC = 0;
+ } else {
+ if (SurfaceTiling == dm_sw_linear) {
+ *BlockHeight256BytesY = 1;
+ *BlockHeight256BytesC = 1;
+ } else if (SourcePixelFormat == dm_420_8) {
+ *BlockHeight256BytesY = 16;
+ *BlockHeight256BytesC = 8;
+ } else {
+ *BlockHeight256BytesY = 8;
+ *BlockHeight256BytesC = 8;
+ }
+ *BlockWidth256BytesY = 256 / BytePerPixelY / *BlockHeight256BytesY;
+ *BlockWidth256BytesC = 256 / BytePerPixelC / *BlockHeight256BytesC;
+ }
+ return true;
+}
+
+static double CalculateTWait(
+ unsigned int PrefetchMode,
+ double DRAMClockChangeLatency,
+ double UrgentLatency,
+ double SREnterPlusExitTime)
+{
+ if (PrefetchMode == 0) {
+ return dml_max(
+ DRAMClockChangeLatency + UrgentLatency,
+ dml_max(SREnterPlusExitTime, UrgentLatency));
+ } else if (PrefetchMode == 1) {
+ return dml_max(SREnterPlusExitTime, UrgentLatency);
+ } else {
+ return UrgentLatency;
+ }
+}
+
+static double CalculateRemoteSurfaceFlipDelay(
+ struct display_mode_lib *mode_lib,
+ double VRatio,
+ double SwathWidth,
+ double Bpp,
+ double LineTime,
+ double XFCTSlvVupdateOffset,
+ double XFCTSlvVupdateWidth,
+ double XFCTSlvVreadyOffset,
+ double XFCXBUFLatencyTolerance,
+ double XFCFillBWOverhead,
+ double XFCSlvChunkSize,
+ double XFCBusTransportTime,
+ double TCalc,
+ double TWait,
+ double *SrcActiveDrainRate,
+ double *TInitXFill,
+ double *TslvChk)
+{
+ double TSlvSetup, AvgfillRate, result;
+
+ *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
+ TSlvSetup = XFCTSlvVupdateOffset + XFCTSlvVupdateWidth + XFCTSlvVreadyOffset;
+ *TInitXFill = XFCXBUFLatencyTolerance / (1 + XFCFillBWOverhead / 100);
+ AvgfillRate = *SrcActiveDrainRate * (1 + XFCFillBWOverhead / 100);
+ *TslvChk = XFCSlvChunkSize / AvgfillRate;
+ dml_print(
+ "DML::CalculateRemoteSurfaceFlipDelay: SrcActiveDrainRate: %f\n",
+ *SrcActiveDrainRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TSlvSetup: %f\n", TSlvSetup);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TInitXFill: %f\n", *TInitXFill);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: AvgfillRate: %f\n", AvgfillRate);
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: TslvChk: %f\n", *TslvChk);
+ result = 2 * XFCBusTransportTime + TSlvSetup + TCalc + TWait + *TslvChk + *TInitXFill; // TODO: This doesn't seem to match programming guide
+ dml_print("DML::CalculateRemoteSurfaceFlipDelay: RemoteSurfaceFlipDelay: %f\n", result);
+ return result;
+}
+
+static double CalculateWriteBackDISPCLK(
+ enum source_format_class WritebackPixelFormat,
+ double PixelClock,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ double WritebackDestinationWidth,
+ unsigned int HTotal,
+ unsigned int WritebackChromaLineBufferWidth)
+{
+ double CalculateWriteBackDISPCLK =
+ 1.01 * PixelClock
+ * dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1)
+ / WritebackHRatio,
+ dml_max(
+ (WritebackLumaVTaps
+ * dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1))
+ / (double) HTotal
+ + dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1)
+ + 4.0)
+ / (double) HTotal,
+ dml_ceil(
+ 1.0
+ / WritebackVRatio,
+ 1)
+ * WritebackDestinationWidth
+ / (double) HTotal));
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDISPCLK =
+ dml_max(
+ CalculateWriteBackDISPCLK,
+ 1.01 * PixelClock
+ * dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ dml_max(
+ (WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / WritebackChromaLineBufferWidth,
+ 1))
+ / HTotal
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)
+ / HTotal,
+ dml_ceil(
+ 1.0
+ / (2
+ * WritebackVRatio),
+ 1)
+ * WritebackDestinationWidth
+ / 2.0
+ / HTotal)));
+ }
+ return CalculateWriteBackDISPCLK;
+}
+
+static double CalculateWriteBackDelay(
+ enum source_format_class WritebackPixelFormat,
+ double WritebackHRatio,
+ double WritebackVRatio,
+ unsigned int WritebackLumaHTaps,
+ unsigned int WritebackLumaVTaps,
+ unsigned int WritebackChromaHTaps,
+ unsigned int WritebackChromaVTaps,
+ unsigned int WritebackDestinationWidth)
+{
+ double CalculateWriteBackDelay =
+ dml_max(
+ dml_ceil(WritebackLumaHTaps / 4.0, 1) / WritebackHRatio,
+ WritebackLumaVTaps * dml_ceil(1.0 / WritebackVRatio, 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 4.0,
+ 1)
+ + dml_ceil(1.0 / WritebackVRatio, 1)
+ * (dml_ceil(
+ WritebackLumaVTaps
+ / 4.0,
+ 1) + 4));
+
+ if (WritebackPixelFormat != dm_444_32) {
+ CalculateWriteBackDelay =
+ dml_max(
+ CalculateWriteBackDelay,
+ dml_max(
+ dml_ceil(
+ WritebackChromaHTaps
+ / 2.0,
+ 1)
+ / (2
+ * WritebackHRatio),
+ WritebackChromaVTaps
+ * dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * dml_ceil(
+ WritebackDestinationWidth
+ / 2.0
+ / 2.0,
+ 1)
+ + dml_ceil(
+ 1
+ / (2
+ * WritebackVRatio),
+ 1)
+ * (dml_ceil(
+ WritebackChromaVTaps
+ / 4.0,
+ 1)
+ + 4)));
+ }
+ return CalculateWriteBackDelay;
+}
+
+static void CalculateActiveRowBandwidth(
+ bool VirtualMemoryEnable,
+ enum source_format_class SourcePixelFormat,
+ double VRatio,
+ bool DCCEnable,
+ double LineTime,
+ unsigned int MetaRowByteLuma,
+ unsigned int MetaRowByteChroma,
+ unsigned int meta_row_height_luma,
+ unsigned int meta_row_height_chroma,
+ unsigned int PixelPTEBytesPerRowLuma,
+ unsigned int PixelPTEBytesPerRowChroma,
+ unsigned int dpte_row_height_luma,
+ unsigned int dpte_row_height_chroma,
+ double *meta_row_bw,
+ double *dpte_row_bw,
+ double *qual_row_bw)
+{
+ if (DCCEnable != true) {
+ *meta_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime)
+ + VRatio / 2 * MetaRowByteChroma
+ / (meta_row_height_chroma * LineTime);
+ } else {
+ *meta_row_bw = VRatio * MetaRowByteLuma / (meta_row_height_luma * LineTime);
+ }
+
+ if (VirtualMemoryEnable != true) {
+ *dpte_row_bw = 0;
+ } else if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime)
+ + VRatio / 2 * PixelPTEBytesPerRowChroma
+ / (dpte_row_height_chroma * LineTime);
+ } else {
+ *dpte_row_bw = VRatio * PixelPTEBytesPerRowLuma / (dpte_row_height_luma * LineTime);
+ }
+
+ if ((SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10)) {
+ *qual_row_bw = *meta_row_bw + *dpte_row_bw;
+ } else {
+ *qual_row_bw = 0;
+ }
+}
+
+static void CalculateFlipSchedule(
+ struct display_mode_lib *mode_lib,
+ double UrgentExtraLatency,
+ double UrgentLatency,
+ unsigned int MaxPageTableLevels,
+ bool VirtualMemoryEnable,
+ double BandwidthAvailableForImmediateFlip,
+ unsigned int TotImmediateFlipBytes,
+ enum source_format_class SourcePixelFormat,
+ unsigned int ImmediateFlipBytes,
+ double LineTime,
+ double Tno_bw,
+ double VRatio,
+ double PDEAndMetaPTEBytesFrame,
+ unsigned int MetaRowByte,
+ unsigned int PixelPTEBytesPerRow,
+ bool DCCEnable,
+ unsigned int dpte_row_height,
+ unsigned int meta_row_height,
+ double qual_row_bw,
+ double *DestinationLinesToRequestVMInImmediateFlip,
+ double *DestinationLinesToRequestRowInImmediateFlip,
+ double *final_flip_bw,
+ bool *ImmediateFlipSupportedForPipe)
+{
+ double min_row_time = 0.0;
+
+ if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
+ *DestinationLinesToRequestVMInImmediateFlip = 0.0;
+ *DestinationLinesToRequestRowInImmediateFlip = 0.0;
+ *final_flip_bw = qual_row_bw;
+ *ImmediateFlipSupportedForPipe = true;
+ } else {
+ double TimeForFetchingMetaPTEImmediateFlip;
+ double TimeForFetchingRowInVBlankImmediateFlip;
+
+ if (VirtualMemoryEnable == true) {
+ mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingMetaPTEImmediateFlip =
+ dml_max(
+ Tno_bw
+ + PDEAndMetaPTEBytesFrame
+ / mode_lib->vba.ImmediateFlipBW,
+ dml_max(
+ UrgentExtraLatency
+ + UrgentLatency
+ * (MaxPageTableLevels
+ - 1),
+ LineTime / 4.0));
+ } else {
+ TimeForFetchingMetaPTEImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestVMInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if ((VirtualMemoryEnable == true || DCCEnable == true)) {
+ mode_lib->vba.ImmediateFlipBW = BandwidthAvailableForImmediateFlip
+ * ImmediateFlipBytes / TotImmediateFlipBytes;
+ TimeForFetchingRowInVBlankImmediateFlip = dml_max(
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / mode_lib->vba.ImmediateFlipBW,
+ dml_max(UrgentLatency, LineTime / 4.0));
+ } else {
+ TimeForFetchingRowInVBlankImmediateFlip = 0;
+ }
+
+ *DestinationLinesToRequestRowInImmediateFlip = dml_floor(
+ 4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime + 0.125),
+ 1) / 4.0;
+
+ if (VirtualMemoryEnable == true) {
+ *final_flip_bw =
+ dml_max(
+ PDEAndMetaPTEBytesFrame
+ / (*DestinationLinesToRequestVMInImmediateFlip
+ * LineTime),
+ (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip
+ * LineTime));
+ } else if (MetaRowByte + PixelPTEBytesPerRow > 0) {
+ *final_flip_bw = (MetaRowByte + PixelPTEBytesPerRow)
+ / (TimeForFetchingRowInVBlankImmediateFlip * LineTime);
+ } else {
+ *final_flip_bw = 0;
+ }
+
+ if (VirtualMemoryEnable && !DCCEnable)
+ min_row_time = dpte_row_height * LineTime / VRatio;
+ else if (!VirtualMemoryEnable && DCCEnable)
+ min_row_time = meta_row_height * LineTime / VRatio;
+ else
+ min_row_time = dml_min(dpte_row_height, meta_row_height) * LineTime
+ / VRatio;
+
+ if (*DestinationLinesToRequestVMInImmediateFlip >= 8
+ || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ || TimeForFetchingMetaPTEImmediateFlip
+ + 2 * TimeForFetchingRowInVBlankImmediateFlip
+ > min_row_time)
+ *ImmediateFlipSupportedForPipe = false;
+ else
+ *ImmediateFlipSupportedForPipe = true;
+ }
+}
+
+static void PixelClockAdjustmentForProgressiveToInterlaceUnit(struct display_mode_lib *mode_lib)
+{
+ unsigned int k;
+
+ //Progressive To dml_ml->vba.Interlace Unit Effect
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ mode_lib->vba.PixelClockBackEnd[k] = mode_lib->vba.PixelClock[k];
+ if (mode_lib->vba.Interlace[k] == 1
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP == true) {
+ mode_lib->vba.PixelClock[k] = 2 * mode_lib->vba.PixelClock[k];
+ }
+ }
+}
+
+static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp)
+{
+ switch (ebpp) {
+ case dm_cur_2bit:
+ return 2;
+ case dm_cur_32bit:
+ return 32;
+ case dm_cur_64bit:
+ return 64;
+ default:
+ return 0;
+ }
+}
+
+static unsigned int TruncToValidBPP(
+ double DecimalBPP,
+ bool DSCEnabled,
+ enum output_encoder_class Output,
+ enum output_format_class Format,
+ unsigned int DSCInputBitPerComponent)
+{
+ if (Output == dm_hdmi) {
+ if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return 0;
+ } else if (Format == dm_444) {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else
+ return 0;
+ } else {
+ if (DecimalBPP / 1.5 >= 24)
+ return 24;
+ else if (DecimalBPP / 1.5 >= 20)
+ return 20;
+ else if (DecimalBPP / 1.5 >= 16)
+ return 16;
+ else
+ return 0;
+ }
+ } else {
+ if (DSCEnabled) {
+ if (Format == dm_420) {
+ if (DecimalBPP < 6)
+ return 0;
+ else if (DecimalBPP >= 1.5 * DSCInputBitPerComponent - 1 / 16)
+ return 1.5 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else if (Format == dm_n422) {
+ if (DecimalBPP < 7)
+ return 0;
+ else if (DecimalBPP >= 2 * DSCInputBitPerComponent - 1 / 16)
+ return 2 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ } else {
+ if (DecimalBPP < 8)
+ return 0;
+ else if (DecimalBPP >= 3 * DSCInputBitPerComponent - 1 / 16)
+ return 3 * DSCInputBitPerComponent - 1 / 16;
+ else
+ return dml_floor(16 * DecimalBPP, 1) / 16;
+ }
+ } else if (Format == dm_420) {
+ if (DecimalBPP >= 18)
+ return 18;
+ else if (DecimalBPP >= 15)
+ return 15;
+ else if (DecimalBPP >= 12)
+ return 12;
+ else
+ return 0;
+ } else if (Format == dm_s422 || Format == dm_n422) {
+ if (DecimalBPP >= 24)
+ return 24;
+ else if (DecimalBPP >= 20)
+ return 20;
+ else if (DecimalBPP >= 16)
+ return 16;
+ else
+ return 0;
+ } else {
+ if (DecimalBPP >= 36)
+ return 36;
+ else if (DecimalBPP >= 30)
+ return 30;
+ else if (DecimalBPP >= 24)
+ return 24;
+ else
+ return 0;
+ }
+ }
+}
+
+static void ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
+{
+ int i;
+ unsigned int j, k;
+ /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+
+ /*Scale Ratio, taps Support Check*/
+
+ mode_lib->vba.ScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ScalerEnabled[k] == false
+ && ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)
+ || mode_lib->vba.HRatio[k] != 1.0
+ || mode_lib->vba.htaps[k] != 1.0
+ || mode_lib->vba.VRatio[k] != 1.0
+ || mode_lib->vba.vtaps[k] != 1.0)) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ } else if (mode_lib->vba.vtaps[k] < 1.0 || mode_lib->vba.vtaps[k] > 8.0
+ || mode_lib->vba.htaps[k] < 1.0 || mode_lib->vba.htaps[k] > 8.0
+ || (mode_lib->vba.htaps[k] > 1.0
+ && (mode_lib->vba.htaps[k] % 2) == 1)
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.MaxHSCLRatio
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.MaxVSCLRatio
+ || mode_lib->vba.HRatio[k] > mode_lib->vba.htaps[k]
+ || mode_lib->vba.VRatio[k] > mode_lib->vba.vtaps[k]
+ || (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8
+ && (mode_lib->vba.HRatio[k] / 2.0
+ > mode_lib->vba.HTAPsChroma[k]
+ || mode_lib->vba.VRatio[k] / 2.0
+ > mode_lib->vba.VTAPsChroma[k]))) {
+ mode_lib->vba.ScaleRatioAndTapsSupport = false;
+ }
+ }
+ /*Source Format, Pixel Format and Scan Support Check*/
+
+ mode_lib->vba.SourceFormatPixelAndScanSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ && mode_lib->vba.SourceScan[k] != dm_horz)
+ || ((mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_var_d_x)
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_64)
+ || (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10))
+ || (((mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_gl
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_gfx7_2d_thin_lvp)
+ && !((mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_444_32)
+ && mode_lib->vba.SourceScan[k]
+ == dm_horz
+ && mode_lib->vba.SupportGFX7CompatibleTilingIn32bppAnd64bpp
+ == true
+ && mode_lib->vba.DCCEnable[k]
+ == false))
+ || (mode_lib->vba.DCCEnable[k] == true
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_linear
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_8
+ || mode_lib->vba.SourcePixelFormat[k]
+ == dm_420_10)))) {
+ mode_lib->vba.SourceFormatPixelAndScanSupport = false;
+ }
+ }
+ /*Bandwidth Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportWidth[k];
+ } else {
+ mode_lib->vba.SwathWidthYSingleDPP[k] = mode_lib->vba.ViewportHeight[k];
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_444_64) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 8.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_32) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 4.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 2.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_mono_8) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 0.0;
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8) {
+ mode_lib->vba.BytePerPixelInDETY[k] = 1.0;
+ mode_lib->vba.BytePerPixelInDETC[k] = 2.0;
+ } else {
+ mode_lib->vba.BytePerPixelInDETY[k] = 4.0 / 3;
+ mode_lib->vba.BytePerPixelInDETC[k] = 8.0 / 3;
+ }
+ }
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.SwathWidthYSingleDPP[k]
+ * (dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
+ * mode_lib->vba.VRatio[k]
+ + dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
+ / 2.0 * mode_lib->vba.VRatio[k] / 2)
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k]);
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 256);
+ }
+ if (mode_lib->vba.VirtualMemoryEnable == true
+ && mode_lib->vba.SourceScan[k] != dm_horz
+ && (mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_4kb_d_x)) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 64);
+ } else if (mode_lib->vba.VirtualMemoryEnable == true
+ && mode_lib->vba.SourceScan[k] == dm_horz
+ && (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32)
+ && (mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_t
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_d_x
+ || mode_lib->vba.SurfaceTiling[k] == dm_sw_64kb_r_x)) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 256);
+ } else if (mode_lib->vba.VirtualMemoryEnable == true) {
+ mode_lib->vba.ReadBandwidth[k] = mode_lib->vba.ReadBandwidth[k]
+ * (1 + 1 / 512);
+ }
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.ReadBandwidth[k] / 1000.0;
+ }
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 4.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 3.0;
+ } else if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WriteBandwidth[k] = mode_lib->vba.WritebackDestinationWidth[k]
+ * mode_lib->vba.WritebackDestinationHeight[k]
+ / (mode_lib->vba.WritebackSourceHeight[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]) * 1.5;
+ } else {
+ mode_lib->vba.WriteBandwidth[k] = 0.0;
+ }
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.WriteBandwidth[k] / 1000.0;
+ }
+ mode_lib->vba.TotalBandwidthConsumedGBytePerSecond =
+ mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond
+ + mode_lib->vba.TotalWriteBandwidthConsumedGBytePerSecond;
+ mode_lib->vba.DCCEnabledInAnyPlane = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.DCCEnabledInAnyPlane = true;
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] = dml_min(
+ mode_lib->vba.DRAMSpeedPerState[i] * mode_lib->vba.NumberOfChannels
+ * mode_lib->vba.DRAMChannelWidth,
+ mode_lib->vba.FabricClockPerState[i]
+ * mode_lib->vba.FabricDatapathToDCNDataReturn)
+ / 1000;
+ mode_lib->vba.ReturnBWToDCNPerState = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0)
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
+ / 100;
+ mode_lib->vba.ReturnBWPerState[i] = mode_lib->vba.ReturnBWToDCNPerState;
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true
+ && mode_lib->vba.ReturnBWToDCNPerState
+ > mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.ReturnBWToDCNPerState * 4.0
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ - mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0)
+ + mode_lib->vba.UrgentLatency)));
+ }
+ mode_lib->vba.CriticalPoint =
+ 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
+ && mode_lib->vba.CriticalPoint < 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ dml_pow(
+ 4.0
+ * mode_lib->vba.ReturnBWToDCNPerState
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0),
+ 2));
+ }
+ mode_lib->vba.ReturnBWToDCNPerState = dml_min(
+ mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i],
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[i] * 1000.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true
+ && mode_lib->vba.ReturnBWToDCNPerState
+ > mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.ReturnBWToDCNPerState * 4.0
+ * (1.0
+ - mode_lib->vba.UrgentLatency
+ / ((mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ - mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.ReturnBusWidth
+ / 4.0)
+ + mode_lib->vba.UrgentLatency)));
+ }
+ mode_lib->vba.CriticalPoint =
+ 2.0 * mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0);
+ if (mode_lib->vba.DCCEnabledInAnyPlane == true && mode_lib->vba.CriticalPoint > 1.0
+ && mode_lib->vba.CriticalPoint < 4.0) {
+ mode_lib->vba.ReturnBWPerState[i] =
+ dml_min(
+ mode_lib->vba.ReturnBWPerState[i],
+ dml_pow(
+ 4.0
+ * mode_lib->vba.ReturnBWToDCNPerState
+ * (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0
+ * mode_lib->vba.ReturnBusWidth
+ * mode_lib->vba.DCFCLKPerState[i]
+ * mode_lib->vba.UrgentLatency
+ / (mode_lib->vba.ReturnBWToDCNPerState
+ * mode_lib->vba.UrgentLatency
+ + (mode_lib->vba.ROBBufferSizeInKByte
+ - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0),
+ 2));
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if ((mode_lib->vba.TotalReadBandwidthConsumedGBytePerSecond * 1000.0
+ <= mode_lib->vba.ReturnBWPerState[i])
+ && (mode_lib->vba.TotalBandwidthConsumedGBytePerSecond * 1000.0
+ <= mode_lib->vba.FabricAndDRAMBandwidthPerState[i]
+ * 1000.0
+ * mode_lib->vba.PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency
+ / 100.0)) {
+ mode_lib->vba.BandwidthSupport[i] = true;
+ } else {
+ mode_lib->vba.BandwidthSupport[i] = false;
+ }
+ }
+ /*Writeback Latency support check*/
+
+ mode_lib->vba.WritebackLatencySupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackPixelFormat[k] == dm_444_32) {
+ if (mode_lib->vba.WriteBandwidth[k]
+ > (mode_lib->vba.WritebackInterfaceLumaBufferSize
+ + mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ } else {
+ if (mode_lib->vba.WriteBandwidth[k]
+ > 1.5
+ * dml_min(
+ mode_lib->vba.WritebackInterfaceLumaBufferSize,
+ 2.0
+ * mode_lib->vba.WritebackInterfaceChromaBufferSize)
+ / mode_lib->vba.WritebackLatency) {
+ mode_lib->vba.WritebackLatencySupport = false;
+ }
+ }
+ }
+ }
+ /*Re-ordering Buffer Support Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i] =
+ (mode_lib->vba.RoundTripPingLatencyCycles + 32.0)
+ / mode_lib->vba.DCFCLKPerState[i]
+ + mode_lib->vba.UrgentOutOfOrderReturnPerChannel
+ * mode_lib->vba.NumberOfChannels
+ / mode_lib->vba.ReturnBWPerState[i];
+ if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte)
+ * 1024.0 / mode_lib->vba.ReturnBWPerState[i]
+ > mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]) {
+ mode_lib->vba.ROBSupport[i] = true;
+ } else {
+ mode_lib->vba.ROBSupport[i] = false;
+ }
+ }
+ /*Writeback Mode Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveWriteback = 0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.TotalNumberOfActiveWriteback =
+ mode_lib->vba.TotalNumberOfActiveWriteback + 1;
+ }
+ }
+ mode_lib->vba.WritebackModeSupport = true;
+ if (mode_lib->vba.TotalNumberOfActiveWriteback > mode_lib->vba.MaxNumWriteback) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true
+ && mode_lib->vba.Writeback10bpc420Supported != true
+ && mode_lib->vba.WritebackPixelFormat[k] == dm_420_10) {
+ mode_lib->vba.WritebackModeSupport = false;
+ }
+ }
+ /*Writeback Scale Ratio and Taps Support Check*/
+
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ if (mode_lib->vba.WritebackLumaAndChromaScalingSupported == false
+ && (mode_lib->vba.WritebackHRatio[k] != 1.0
+ || mode_lib->vba.WritebackVRatio[k] != 1.0)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackHRatio[k] > mode_lib->vba.WritebackMaxHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackMaxVSCLRatio
+ || mode_lib->vba.WritebackHRatio[k]
+ < mode_lib->vba.WritebackMinHSCLRatio
+ || mode_lib->vba.WritebackVRatio[k]
+ < mode_lib->vba.WritebackMinVSCLRatio
+ || mode_lib->vba.WritebackLumaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackLumaHTaps[k]
+ || mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackLumaVTaps[k]
+ || (mode_lib->vba.WritebackLumaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackLumaHTaps[k] % 2)
+ == 1))
+ || (mode_lib->vba.WritebackPixelFormat[k] != dm_444_32
+ && (mode_lib->vba.WritebackChromaHTaps[k]
+ > mode_lib->vba.WritebackMaxHSCLTaps
+ || mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackMaxVSCLTaps
+ || 2.0
+ * mode_lib->vba.WritebackHRatio[k]
+ > mode_lib->vba.WritebackChromaHTaps[k]
+ || 2.0
+ * mode_lib->vba.WritebackVRatio[k]
+ > mode_lib->vba.WritebackChromaVTaps[k]
+ || (mode_lib->vba.WritebackChromaHTaps[k] > 2.0
+ && ((mode_lib->vba.WritebackChromaHTaps[k] % 2) == 1))))) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (mode_lib->vba.WritebackVRatio[k] < 1.0) {
+ mode_lib->vba.WritebackLumaVExtra =
+ dml_max(1.0 - 2.0 / dml_ceil(1.0 / mode_lib->vba.WritebackVRatio[k], 1.0), 0.0);
+ } else {
+ mode_lib->vba.WritebackLumaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_444_32
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > (mode_lib->vba.WritebackLineBufferLumaBufferSize
+ + mode_lib->vba.WritebackLineBufferChromaBufferSize)
+ / 3.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackLumaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferLumaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackLumaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ if (2.0 * mode_lib->vba.WritebackVRatio[k] < 1) {
+ mode_lib->vba.WritebackChromaVExtra = 0.0;
+ } else {
+ mode_lib->vba.WritebackChromaVExtra = -1;
+ }
+ if ((mode_lib->vba.WritebackPixelFormat[k] == dm_420_8
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)
+ || (mode_lib->vba.WritebackPixelFormat[k] == dm_420_10
+ && mode_lib->vba.WritebackChromaVTaps[k]
+ > mode_lib->vba.WritebackLineBufferChromaBufferSize
+ * 8.0 / 10.0
+ / mode_lib->vba.WritebackDestinationWidth[k]
+ - mode_lib->vba.WritebackChromaVExtra)) {
+ mode_lib->vba.WritebackScaleRatioAndTapsSupport = false;
+ }
+ }
+ }
+ /*Maximum DISPCLK/DPPCLK Support check*/
+
+ mode_lib->vba.WritebackRequiredDISPCLK = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackRequiredDISPCLK =
+ dml_max(
+ mode_lib->vba.WritebackRequiredDISPCLK,
+ CalculateWriteBackDISPCLK(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.WritebackChromaLineBufferWidth));
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.HRatio[k] > 1.0) {
+ mode_lib->vba.PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / dml_ceil(
+ mode_lib->vba.htaps[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_FACTOR[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] = 0.0;
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max3(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_FACTOR[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0)
+ && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ } else {
+ if (mode_lib->vba.HRatio[k] / 2.0 > 1.0) {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] =
+ dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ / dml_ceil(
+ mode_lib->vba.HTAPsChroma[k]
+ / 6.0,
+ 1.0));
+ } else {
+ mode_lib->vba.PSCL_FACTOR_CHROMA[k] = dml_min(
+ mode_lib->vba.MaxDCHUBToPSCLThroughput,
+ mode_lib->vba.MaxPSCLToLBThroughput);
+ }
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] =
+ mode_lib->vba.PixelClock[k]
+ * dml_max5(
+ mode_lib->vba.vtaps[k] / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / mode_lib->vba.PSCL_FACTOR[k],
+ mode_lib->vba.VTAPsChroma[k]
+ / 6.0
+ * dml_min(
+ 1.0,
+ mode_lib->vba.HRatio[k]
+ / 2.0),
+ mode_lib->vba.HRatio[k]
+ * mode_lib->vba.VRatio[k]
+ / 4.0
+ / mode_lib->vba.PSCL_FACTOR_CHROMA[k],
+ 1.0);
+ if ((mode_lib->vba.htaps[k] > 6.0 || mode_lib->vba.vtaps[k] > 6.0
+ || mode_lib->vba.HTAPsChroma[k] > 6.0
+ || mode_lib->vba.VTAPsChroma[k] > 6.0)
+ && mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ < 2.0 * mode_lib->vba.PixelClock[k]) {
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k] = 2.0
+ * mode_lib->vba.PixelClock[k];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ Calculate256BBlockSizes(
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
+ dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
+ &mode_lib->vba.Read256BlockHeightY[k],
+ &mode_lib->vba.Read256BlockHeightC[k],
+ &mode_lib->vba.Read256BlockWidthY[k],
+ &mode_lib->vba.Read256BlockWidthC[k]);
+ if (mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockHeightY[k];
+ mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockHeightC[k];
+ } else {
+ mode_lib->vba.MaxSwathHeightY[k] = mode_lib->vba.Read256BlockWidthY[k];
+ mode_lib->vba.MaxSwathHeightC[k] = mode_lib->vba.Read256BlockWidthC[k];
+ }
+ if ((mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_32
+ || mode_lib->vba.SourcePixelFormat[k] == dm_444_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_16
+ || mode_lib->vba.SourcePixelFormat[k] == dm_mono_8)) {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear
+ || (mode_lib->vba.SourcePixelFormat[k] == dm_444_64
+ && (mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_4kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_t
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_64kb_s_x
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s
+ || mode_lib->vba.SurfaceTiling[k]
+ == dm_sw_var_s_x)
+ && mode_lib->vba.SourceScan[k] == dm_horz)) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ } else {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
+ / 2.0;
+ }
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else {
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_8
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k]
+ / 2.0;
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ } else if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10
+ && mode_lib->vba.SourceScan[k] == dm_horz) {
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k]
+ / 2.0;
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ } else {
+ mode_lib->vba.MinSwathHeightY[k] = mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.MinSwathHeightC[k] = mode_lib->vba.MaxSwathHeightC[k];
+ }
+ }
+ if (mode_lib->vba.SurfaceTiling[k] == dm_sw_linear) {
+ mode_lib->vba.MaximumSwathWidthSupport = 8192.0;
+ } else {
+ mode_lib->vba.MaximumSwathWidthSupport = 5120.0;
+ }
+ mode_lib->vba.MaximumSwathWidthInDETBuffer =
+ dml_min(
+ mode_lib->vba.MaximumSwathWidthSupport,
+ mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0
+ / (mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.MinSwathHeightY[k]
+ + mode_lib->vba.BytePerPixelInDETC[k]
+ / 2.0
+ * mode_lib->vba.MinSwathHeightC[k]));
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ mode_lib->vba.LineBufferSize
+ * dml_max(mode_lib->vba.HRatio[k], 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0));
+ } else {
+ mode_lib->vba.MaximumSwathWidthInLineBuffer =
+ dml_min(
+ mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.vtaps[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k],
+ 1.0)
+ - 2,
+ 0.0)),
+ 2.0 * mode_lib->vba.LineBufferSize
+ * dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.VTAPsChroma[k]
+ + dml_max(
+ dml_ceil(
+ mode_lib->vba.VRatio[k]
+ / 2.0,
+ 1.0)
+ - 2,
+ 0.0)));
+ }
+ mode_lib->vba.MaximumSwathWidth[k] = dml_min(
+ mode_lib->vba.MaximumSwathWidthInDETBuffer,
+ mode_lib->vba.MaximumSwathWidthInLineBuffer);
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDispclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(
+ mode_lib->vba.MaxDppclk[i],
+ mode_lib->vba.DISPCLKDPPCLKVCOSpeed);
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * (1.0
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100.0);
+ if (mode_lib->vba.ODMCapability == true
+ && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ / 2.0;
+ } else {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ }
+ if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]
+ && mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k] / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ if ((mode_lib->vba.MaxDispclk[i] == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
+ && mode_lib->vba.MaxDppclk[i]
+ == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])
+ && (mode_lib->vba.TotalNumberOfActiveDPP[i]
+ > mode_lib->vba.MaxNumDPP
+ || mode_lib->vba.DISPCLK_DPPCLK_Support[i] == false)) {
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ if (mode_lib->vba.ODMCapability == true
+ && mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = true;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine
+ / 2.0;
+ } else {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
+ }
+ if (mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity
+ && mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]
+ && mode_lib->vba.ODMCombineEnablePerState[i][k]
+ == false) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveDPP[i] > mode_lib->vba.MaxNumDPP) {
+ mode_lib->vba.RequiredDISPCLK[i] = 0.0;
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ODMCombineEnablePerState[i][k] = false;
+ if (mode_lib->vba.SwathWidthYSingleDPP[k]
+ <= mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.NoOfDPP[i][k] = 1;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ } else {
+ mode_lib->vba.NoOfDPP[i][k] = 2;
+ mode_lib->vba.RequiredDPPCLK[i][k] =
+ mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ / 2.0;
+ }
+ if (!(mode_lib->vba.MaxDispclk[i]
+ == mode_lib->vba.MaxDispclk[DC__VOLTAGE_STATES]
+ && mode_lib->vba.MaxDppclk[i]
+ == mode_lib->vba.MaxDppclk[DC__VOLTAGE_STATES])) {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * (1.0
+ + mode_lib->vba.DISPCLKRampingMargin
+ / 100.0);
+ } else {
+ mode_lib->vba.PlaneRequiredDISPCLK =
+ mode_lib->vba.PixelClock[k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0);
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PlaneRequiredDISPCLK);
+ if ((mode_lib->vba.MinDPPCLKUsingSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k]
+ * (1.0
+ + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ > mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity)
+ || (mode_lib->vba.PlaneRequiredDISPCLK
+ > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity)) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ mode_lib->vba.TotalNumberOfActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.TotalNumberOfActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ mode_lib->vba.RequiredDISPCLK[i] = dml_max(
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.WritebackRequiredDISPCLK);
+ if (mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity
+ < mode_lib->vba.WritebackRequiredDISPCLK) {
+ mode_lib->vba.DISPCLK_DPPCLK_Support[i] = false;
+ }
+ }
+ /*Viewport Size Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.ViewportSizeSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ if (dml_min(
+ mode_lib->vba.SwathWidthYSingleDPP[k],
+ dml_round(
+ mode_lib->vba.HActive[k] / 2.0
+ * mode_lib->vba.HRatio[k]))
+ > mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.ViewportSizeSupport[i] = false;
+ }
+ } else {
+ if (mode_lib->vba.SwathWidthYSingleDPP[k] / 2.0
+ > mode_lib->vba.MaximumSwathWidth[k]) {
+ mode_lib->vba.ViewportSizeSupport[i] = false;
+ }
+ }
+ }
+ }
+ /*Total Available Pipes Support Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if (mode_lib->vba.TotalNumberOfActiveDPP[i] <= mode_lib->vba.MaxNumDPP) {
+ mode_lib->vba.TotalAvailablePipesSupport[i] = true;
+ } else {
+ mode_lib->vba.TotalAvailablePipesSupport[i] = false;
+ }
+ }
+ /*Total Available OTG Support Check*/
+
+ mode_lib->vba.TotalNumberOfActiveOTG = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.TotalNumberOfActiveOTG = mode_lib->vba.TotalNumberOfActiveOTG
+ + 1.0;
+ }
+ }
+ if (mode_lib->vba.TotalNumberOfActiveOTG <= mode_lib->vba.MaxNumOTG) {
+ mode_lib->vba.NumberOfOTGSupport = true;
+ } else {
+ mode_lib->vba.NumberOfOTGSupport = false;
+ }
+ /*Display IO and DSC Support Check*/
+
+ mode_lib->vba.NonsupportedDSCInputBPC = false;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (!(mode_lib->vba.DSCInputBitPerComponent[k] == 12.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 10.0
+ || mode_lib->vba.DSCInputBitPerComponent[k] == 8.0)) {
+ mode_lib->vba.NonsupportedDSCInputBPC = true;
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.RequiresDSC[i][k] = 0;
+ mode_lib->vba.RequiresFEC[i][k] = 0;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.Output[k] == dm_hdmi) {
+ mode_lib->vba.RequiresDSC[i][k] = 0;
+ mode_lib->vba.RequiresFEC[i][k] = 0;
+ mode_lib->vba.OutputBppPerState[i][k] =
+ TruncToValidBPP(
+ dml_min(
+ 600.0,
+ mode_lib->vba.PHYCLKPerState[i])
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 24,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ } else if (mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp) {
+ if (mode_lib->vba.Output[k] == dm_edp) {
+ mode_lib->vba.EffectiveFECOverhead = 0.0;
+ } else {
+ mode_lib->vba.EffectiveFECOverhead =
+ mode_lib->vba.FECOverhead;
+ }
+ if (mode_lib->vba.PHYCLKPerState[i] >= 270.0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 270.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == 0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 540.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ if (mode_lib->vba.Outbpp == 0
+ && mode_lib->vba.PHYCLKPerState[i]
+ >= 810.0) {
+ mode_lib->vba.Outbpp =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ false,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ mode_lib->vba.OutbppDSC =
+ TruncToValidBPP(
+ (1.0
+ - mode_lib->vba.Downspreading
+ / 100.0)
+ * (1.0
+ - mode_lib->vba.EffectiveFECOverhead
+ / 100.0)
+ * 810.0
+ * mode_lib->vba.OutputLinkDPLanes[k]
+ / mode_lib->vba.PixelClockBackEnd[k]
+ * 8.0,
+ true,
+ mode_lib->vba.Output[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.DSCInputBitPerComponent[k]);
+ if (mode_lib->vba.DSCEnabled[k] == true
+ || mode_lib->vba.Outbpp == 0) {
+ mode_lib->vba.RequiresDSC[i][k] = true;
+ if (mode_lib->vba.Output[k] == dm_dp) {
+ mode_lib->vba.RequiresFEC[i][k] =
+ true;
+ } else {
+ mode_lib->vba.RequiresFEC[i][k] =
+ false;
+ }
+ mode_lib->vba.Outbpp =
+ mode_lib->vba.OutbppDSC;
+ } else {
+ mode_lib->vba.RequiresDSC[i][k] = false;
+ mode_lib->vba.RequiresFEC[i][k] = false;
+ }
+ mode_lib->vba.OutputBppPerState[i][k] =
+ mode_lib->vba.Outbpp;
+ }
+ }
+ } else {
+ mode_lib->vba.OutputBppPerState[i][k] = 0;
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.DIOSupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.OutputBppPerState[i][k] == 0
+ || (mode_lib->vba.OutputFormat[k] == dm_420
+ && mode_lib->vba.ProgressiveToInterlaceUnitInOPP
+ == true)) {
+ mode_lib->vba.DIOSupport[i] = false;
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if ((mode_lib->vba.Output[k] == dm_dp
+ || mode_lib->vba.Output[k] == dm_edp)) {
+ if (mode_lib->vba.OutputFormat[k] == dm_420
+ || mode_lib->vba.OutputFormat[k]
+ == dm_n422) {
+ mode_lib->vba.DSCFormatFactor = 2;
+ } else {
+ mode_lib->vba.DSCFormatFactor = 1;
+ }
+ if (mode_lib->vba.RequiresDSC[i][k] == true) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k]
+ == true) {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 6.0
+ / mode_lib->vba.DSCFormatFactor
+ > (1.0
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * mode_lib->vba.MaxDSCCLK[i]) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ } else {
+ if (mode_lib->vba.PixelClockBackEnd[k] / 3.0
+ / mode_lib->vba.DSCFormatFactor
+ > (1.0
+ - mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading
+ / 100.0)
+ * mode_lib->vba.MaxDSCCLK[i]) {
+ mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] =
+ true;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.NotEnoughDSCUnits[i] = false;
+ mode_lib->vba.TotalDSCUnitsRequired = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.RequiresDSC[i][k] == true) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 2.0;
+ } else {
+ mode_lib->vba.TotalDSCUnitsRequired =
+ mode_lib->vba.TotalDSCUnitsRequired + 1.0;
+ }
+ }
+ }
+ if (mode_lib->vba.TotalDSCUnitsRequired > mode_lib->vba.NumberOfDSC) {
+ mode_lib->vba.NotEnoughDSCUnits[i] = true;
+ }
+ }
+ /*DSC Delay per state*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] != k) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.RequiresDSC[i][k] == 0
+ || mode_lib->vba.RequiresDSC[i][k] == false) {
+ mode_lib->vba.slices = 0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 3200.0) {
+ mode_lib->vba.slices = dml_ceil(
+ mode_lib->vba.PixelClockBackEnd[k] / 400.0,
+ 4.0);
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 1360.0) {
+ mode_lib->vba.slices = 8.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 680.0) {
+ mode_lib->vba.slices = 4.0;
+ } else if (mode_lib->vba.PixelClockBackEnd[k] > 340.0) {
+ mode_lib->vba.slices = 2.0;
+ } else {
+ mode_lib->vba.slices = 1.0;
+ }
+ if (mode_lib->vba.OutputBppPerState[i][k] == 0
+ || mode_lib->vba.OutputBppPerState[i][k] == 0) {
+ mode_lib->vba.bpp = 0.0;
+ } else {
+ mode_lib->vba.bpp = mode_lib->vba.OutputBppPerState[i][k];
+ }
+ if (mode_lib->vba.RequiresDSC[i][k] == true && mode_lib->vba.bpp != 0.0) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == false) {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]);
+ } else {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ 2.0
+ * (dscceComputeDelay(
+ mode_lib->vba.DSCInputBitPerComponent[k],
+ mode_lib->vba.bpp,
+ dml_ceil(
+ mode_lib->vba.HActive[k]
+ / mode_lib->vba.slices,
+ 1.0),
+ mode_lib->vba.slices
+ / 2,
+ mode_lib->vba.OutputFormat[k])
+ + dscComputeDelay(
+ mode_lib->vba.OutputFormat[k]));
+ }
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ mode_lib->vba.DSCDelayPerState[i][k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.PixelClockBackEnd[k];
+ } else {
+ mode_lib->vba.DSCDelayPerState[i][k] = 0.0;
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j
+ && mode_lib->vba.RequiresDSC[i][j] == true) {
+ mode_lib->vba.DSCDelayPerState[i][k] =
+ mode_lib->vba.DSCDelayPerState[i][j];
+ }
+ }
+ }
+ }
+ /*Urgent Latency Support Check*/
+
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ if (mode_lib->vba.ODMCombineEnablePerState[i][k] == true) {
+ mode_lib->vba.SwathWidthYPerState[i][k] =
+ dml_min(
+ mode_lib->vba.SwathWidthYSingleDPP[k],
+ dml_round(
+ mode_lib->vba.HActive[k]
+ / 2.0
+ * mode_lib->vba.HRatio[k]));
+ } else {
+ mode_lib->vba.SwathWidthYPerState[i][k] =
+ mode_lib->vba.SwathWidthYSingleDPP[k]
+ / mode_lib->vba.NoOfDPP[i][k];
+ }
+ mode_lib->vba.SwathWidthGranularityY = 256.0
+ / dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0)
+ / mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY = (dml_ceil(
+ mode_lib->vba.SwathWidthYPerState[i][k] - 1.0,
+ mode_lib->vba.SwathWidthGranularityY)
+ + mode_lib->vba.SwathWidthGranularityY)
+ * mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.MaxSwathHeightY[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY = dml_ceil(
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesY,
+ 256.0) + 256;
+ }
+ if (mode_lib->vba.MaxSwathHeightC[k] > 0.0) {
+ mode_lib->vba.SwathWidthGranularityC = 256.0
+ / dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0)
+ / mode_lib->vba.MaxSwathHeightC[k];
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = (dml_ceil(
+ mode_lib->vba.SwathWidthYPerState[i][k] / 2.0 - 1.0,
+ mode_lib->vba.SwathWidthGranularityC)
+ + mode_lib->vba.SwathWidthGranularityC)
+ * mode_lib->vba.BytePerPixelInDETC[k]
+ * mode_lib->vba.MaxSwathHeightC[k];
+ if (mode_lib->vba.SourcePixelFormat[k] == dm_420_10) {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = dml_ceil(
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC,
+ 256.0) + 256;
+ }
+ } else {
+ mode_lib->vba.RoundedUpMaxSwathSizeBytesC = 0.0;
+ }
+ if (mode_lib->vba.RoundedUpMaxSwathSizeBytesY
+ + mode_lib->vba.RoundedUpMaxSwathSizeBytesC
+ <= mode_lib->vba.DETBufferSizeInKByte * 1024.0 / 2.0) {
+ mode_lib->vba.SwathHeightYPerState[i][k] =
+ mode_lib->vba.MaxSwathHeightY[k];
+ mode_lib->vba.SwathHeightCPerState[i][k] =
+ mode_lib->vba.MaxSwathHeightC[k];
+ } else {
+ mode_lib->vba.SwathHeightYPerState[i][k] =
+ mode_lib->vba.MinSwathHeightY[k];
+ mode_lib->vba.SwathHeightCPerState[i][k] =
+ mode_lib->vba.MinSwathHeightC[k];
+ }
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = 0.0;
+ } else if (mode_lib->vba.SwathHeightYPerState[i][k]
+ <= mode_lib->vba.SwathHeightCPerState[i][k]) {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 2.0 / mode_lib->vba.BytePerPixelInDETC[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
+ } else {
+ mode_lib->vba.LinesInDETLuma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 * 2.0 / 3.0
+ / mode_lib->vba.BytePerPixelInDETY[k]
+ / mode_lib->vba.SwathWidthYPerState[i][k];
+ mode_lib->vba.LinesInDETChroma = mode_lib->vba.DETBufferSizeInKByte
+ * 1024.0 / 3.0 / mode_lib->vba.BytePerPixelInDETY[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k] / 2.0);
+ }
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ dml_floor(
+ mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k]
+ / dml_max(
+ mode_lib->vba.HRatio[k],
+ 1.0)),
+ 1.0))
+ - (mode_lib->vba.vtaps[k] - 1.0);
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma =
+ dml_min(
+ mode_lib->vba.MaxLineBufferLines,
+ dml_floor(
+ mode_lib->vba.LineBufferSize
+ / mode_lib->vba.LBBitPerPixel[k]
+ / (mode_lib->vba.SwathWidthYPerState[i][k]
+ / 2.0
+ / dml_max(
+ mode_lib->vba.HRatio[k]
+ / 2.0,
+ 1.0)),
+ 1.0))
+ - (mode_lib->vba.VTAPsChroma[k] - 1.0);
+ mode_lib->vba.EffectiveDETLBLinesLuma =
+ dml_floor(
+ mode_lib->vba.LinesInDETLuma
+ + dml_min(
+ mode_lib->vba.LinesInDETLuma
+ * mode_lib->vba.RequiredDISPCLK[i]
+ * mode_lib->vba.BytePerPixelInDETY[k]
+ * mode_lib->vba.PSCL_FACTOR[k]
+ / mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesLuma),
+ mode_lib->vba.SwathHeightYPerState[i][k]);
+ mode_lib->vba.EffectiveDETLBLinesChroma =
+ dml_floor(
+ mode_lib->vba.LinesInDETChroma
+ + dml_min(
+ mode_lib->vba.LinesInDETChroma
+ * mode_lib->vba.RequiredDISPCLK[i]
+ * mode_lib->vba.BytePerPixelInDETC[k]
+ * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
+ / mode_lib->vba.ReturnBWPerState[i],
+ mode_lib->vba.EffectiveLBLatencyHidingSourceLinesChroma),
+ mode_lib->vba.SwathHeightCPerState[i][k]);
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
+ mode_lib->vba.EffectiveDETLBLinesLuma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETLBLinesLuma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.UrgentLatencySupportUsPerState[i][k] =
+ dml_min(
+ mode_lib->vba.EffectiveDETLBLinesLuma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k]
+ - mode_lib->vba.EffectiveDETLBLinesLuma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]),
+ mode_lib->vba.EffectiveDETLBLinesChroma
+ * (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k])
+ / (mode_lib->vba.VRatio[k]
+ / 2.0)
+ - mode_lib->vba.EffectiveDETLBLinesChroma
+ * mode_lib->vba.SwathWidthYPerState[i][k]
+ / 2.0
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / (mode_lib->vba.ReturnBWPerState[i]
+ / mode_lib->vba.NoOfDPP[i][k]));
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.UrgentLatencySupport[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.UrgentLatencySupportUsPerState[i][k]
+ < mode_lib->vba.UrgentLatency / 1.0) {
+ mode_lib->vba.UrgentLatencySupport[i] = false;
+ }
+ }
+ }
+ /*Prefetch Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i] = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i] =
+ mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
+ + mode_lib->vba.NoOfDPP[i][k];
+ }
+ }
+ }
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep = 8.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep = dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.PixelClock[k] / 16.0);
+ if (mode_lib->vba.BytePerPixelInDETC[k] == 0.0) {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 64.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ } else {
+ if (mode_lib->vba.VRatio[k] <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ if (mode_lib->vba.VRatio[k] / 2.0 <= 1.0) {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.HRatio[k]
+ / 2.0
+ * mode_lib->vba.PixelClock[k]
+ / mode_lib->vba.NoOfDPP[i][k]);
+ } else {
+ mode_lib->vba.ProjectedDCFCLKDeepSleep =
+ dml_max(
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ 1.1
+ * dml_ceil(
+ mode_lib->vba.BytePerPixelInDETC[k],
+ 2.0)
+ / 32.0
+ * mode_lib->vba.PSCL_FACTOR_CHROMA[k]
+ * mode_lib->vba.RequiredDPPCLK[i][k]);
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k],
+ mode_lib->vba.ViewportHeight[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchY[k],
+ mode_lib->vba.DCCMetaPitchY[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MetaRowBytesY,
+ &mode_lib->vba.DPTEBytesPerRowY,
+ &mode_lib->vba.PTEBufferSizeNotExceededY[i][k],
+ &mode_lib->vba.dpte_row_height[k],
+ &mode_lib->vba.meta_row_height[k]);
+ mode_lib->vba.PrefetchLinesY[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.vtaps[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightYPerState[i][k],
+ mode_lib->vba.ViewportYStartY[k],
+ &mode_lib->vba.PrefillY[k],
+ &mode_lib->vba.MaxNumSwY[k]);
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8)) {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = CalculateVMAndRowBytes(
+ mode_lib,
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.Read256BlockHeightY[k],
+ mode_lib->vba.Read256BlockWidthY[k],
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.SurfaceTiling[k],
+ dml_ceil(mode_lib->vba.BytePerPixelInDETC[k], 2.0),
+ mode_lib->vba.SourceScan[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0,
+ mode_lib->vba.ViewportHeight[k] / 2.0,
+ mode_lib->vba.SwathWidthYPerState[i][k] / 2.0,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.VMMPageSize,
+ mode_lib->vba.PTEBufferSizeInRequests,
+ mode_lib->vba.PDEProcessingBufIn64KBReqs,
+ mode_lib->vba.PitchC[k],
+ 0.0,
+ &mode_lib->vba.MacroTileWidthC[k],
+ &mode_lib->vba.MetaRowBytesC,
+ &mode_lib->vba.DPTEBytesPerRowC,
+ &mode_lib->vba.PTEBufferSizeNotExceededC[i][k],
+ &mode_lib->vba.dpte_row_height_chroma[k],
+ &mode_lib->vba.meta_row_height_chroma[k]);
+ mode_lib->vba.PrefetchLinesC[k] = CalculatePrefetchSourceLines(
+ mode_lib,
+ mode_lib->vba.VRatio[k] / 2.0,
+ mode_lib->vba.VTAPsChroma[k],
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.SwathHeightCPerState[i][k],
+ mode_lib->vba.ViewportYStartC[k],
+ &mode_lib->vba.PrefillC[k],
+ &mode_lib->vba.MaxNumSwC[k]);
+ } else {
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameC = 0.0;
+ mode_lib->vba.MetaRowBytesC = 0.0;
+ mode_lib->vba.DPTEBytesPerRowC = 0.0;
+ mode_lib->vba.PrefetchLinesC[k] = 0.0;
+ mode_lib->vba.PTEBufferSizeNotExceededC[i][k] = true;
+ }
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrameY
+ + mode_lib->vba.PDEAndMetaPTEBytesPerFrameC;
+ mode_lib->vba.MetaRowBytes[k] = mode_lib->vba.MetaRowBytesY
+ + mode_lib->vba.MetaRowBytesC;
+ mode_lib->vba.DPTEBytesPerRow[k] = mode_lib->vba.DPTEBytesPerRowY
+ + mode_lib->vba.DPTEBytesPerRowC;
+ }
+ mode_lib->vba.ExtraLatency =
+ mode_lib->vba.UrgentRoundTripAndOutOfOrderLatencyPerState[i]
+ + (mode_lib->vba.TotalNumberOfActiveDPP[i]
+ * mode_lib->vba.PixelChunkSizeInKByte
+ + mode_lib->vba.TotalNumberOfDCCActiveDPP[i]
+ * mode_lib->vba.MetaChunkSize)
+ * 1024.0
+ / mode_lib->vba.ReturnBWPerState[i];
+ if (mode_lib->vba.VirtualMemoryEnable == true) {
+ mode_lib->vba.ExtraLatency = mode_lib->vba.ExtraLatency
+ + mode_lib->vba.TotalNumberOfActiveDPP[i]
+ * mode_lib->vba.PTEChunkSize * 1024.0
+ / mode_lib->vba.ReturnBWPerState[i];
+ }
+ mode_lib->vba.TimeCalc = 24.0 / mode_lib->vba.ProjectedDCFCLKDeepSleep;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ if (mode_lib->vba.WritebackEnable[k] == true) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[k],
+ mode_lib->vba.WritebackHRatio[k],
+ mode_lib->vba.WritebackVRatio[k],
+ mode_lib->vba.WritebackLumaHTaps[k],
+ mode_lib->vba.WritebackLumaVTaps[k],
+ mode_lib->vba.WritebackChromaHTaps[k],
+ mode_lib->vba.WritebackChromaVTaps[k],
+ mode_lib->vba.WritebackDestinationWidth[k])
+ / mode_lib->vba.RequiredDISPCLK[i];
+ } else {
+ mode_lib->vba.WritebackDelay[i][k] = 0.0;
+ }
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[j] == k
+ && mode_lib->vba.WritebackEnable[j]
+ == true) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ dml_max(
+ mode_lib->vba.WritebackDelay[i][k],
+ mode_lib->vba.WritebackLatency
+ + CalculateWriteBackDelay(
+ mode_lib->vba.WritebackPixelFormat[j],
+ mode_lib->vba.WritebackHRatio[j],
+ mode_lib->vba.WritebackVRatio[j],
+ mode_lib->vba.WritebackLumaHTaps[j],
+ mode_lib->vba.WritebackLumaVTaps[j],
+ mode_lib->vba.WritebackChromaHTaps[j],
+ mode_lib->vba.WritebackChromaVTaps[j],
+ mode_lib->vba.WritebackDestinationWidth[j])
+ / mode_lib->vba.RequiredDISPCLK[i]);
+ }
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ for (j = 0; j <= mode_lib->vba.NumberOfActivePlanes - 1; j++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j) {
+ mode_lib->vba.WritebackDelay[i][k] =
+ mode_lib->vba.WritebackDelay[i][j];
+ }
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.MaximumVStartup[k] =
+ mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k]
+ - dml_max(
+ 1.0,
+ dml_ceil(
+ mode_lib->vba.WritebackDelay[i][k]
+ / (mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]),
+ 1.0));
+ }
+ mode_lib->vba.TWait = CalculateTWait(
+ mode_lib->vba.PrefetchMode,
+ mode_lib->vba.DRAMClockChangeLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.SREnterPlusExitTime);
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ dml_ceil(
+ mode_lib->vba.BytePerPixelInDETY[k],
+ 1.0),
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
+ }
+ mode_lib->vba.IsErrorResult[i][k] =
+ CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.RequiredDPPCLK[i][k],
+ mode_lib->vba.RequiredDISPCLK[i],
+ mode_lib->vba.PixelClock[k],
+ mode_lib->vba.ProjectedDCFCLKDeepSleep,
+ mode_lib->vba.DSCDelayPerState[i][k],
+ mode_lib->vba.NoOfDPP[i][k],
+ mode_lib->vba.ScalerEnabled[k],
+ mode_lib->vba.NumberOfCursors[k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ mode_lib->vba.SwathWidthYPerState[i][k]
+ / mode_lib->vba.HRatio[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.VTotal[k]
+ - mode_lib->vba.VActive[k],
+ mode_lib->vba.HTotal[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ mode_lib->vba.MaximumVStartup[k],
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
+ mode_lib->vba.MetaRowBytes[k],
+ mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.PrefetchLinesY[k],
+ mode_lib->vba.SwathWidthYPerState[i][k],
+ mode_lib->vba.BytePerPixelInDETY[k],
+ mode_lib->vba.PrefillY[k],
+ mode_lib->vba.MaxNumSwY[k],
+ mode_lib->vba.PrefetchLinesC[k],
+ mode_lib->vba.BytePerPixelInDETC[k],
+ mode_lib->vba.PrefillC[k],
+ mode_lib->vba.MaxNumSwC[k],
+ mode_lib->vba.SwathHeightYPerState[i][k],
+ mode_lib->vba.SwathHeightCPerState[i][k],
+ mode_lib->vba.TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.Interlace[k],
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ mode_lib->vba.DSTXAfterScaler,
+ mode_lib->vba.DSTYAfterScaler,
+ &mode_lib->vba.LineTimesForPrefetch[k],
+ &mode_lib->vba.PrefetchBW[k],
+ &mode_lib->vba.LinesForMetaPTE[k],
+ &mode_lib->vba.LinesForMetaAndDPTERow[k],
+ &mode_lib->vba.VRatioPreY[i][k],
+ &mode_lib->vba.VRatioPreC[i][k],
+ &mode_lib->vba.RequiredPrefetchPixelDataBW[i][k],
+ &mode_lib->vba.VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &mode_lib->vba.Tno_bw[k],
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.cursor_bw[k] = mode_lib->vba.NumberOfCursors[k]
+ * mode_lib->vba.CursorWidth[k][0]
+ * mode_lib->vba.CursorBPP[k][0] / 8.0
+ / (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ * mode_lib->vba.VRatio[k];
+ }
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
+ mode_lib->vba.prefetch_vm_bw_valid = true;
+ mode_lib->vba.prefetch_row_bw_valid = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k] == 0.0) {
+ mode_lib->vba.prefetch_vm_bw[k] = 0.0;
+ } else if (mode_lib->vba.LinesForMetaPTE[k] > 0.0) {
+ mode_lib->vba.prefetch_vm_bw[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
+ / (mode_lib->vba.LinesForMetaPTE[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_vm_bw[k] = 0.0;
+ mode_lib->vba.prefetch_vm_bw_valid = false;
+ }
+ if (mode_lib->vba.MetaRowBytes[k] + mode_lib->vba.DPTEBytesPerRow[k]
+ == 0.0) {
+ mode_lib->vba.prefetch_row_bw[k] = 0.0;
+ } else if (mode_lib->vba.LinesForMetaAndDPTERow[k] > 0.0) {
+ mode_lib->vba.prefetch_row_bw[k] = (mode_lib->vba.MetaRowBytes[k]
+ + mode_lib->vba.DPTEBytesPerRow[k])
+ / (mode_lib->vba.LinesForMetaAndDPTERow[k]
+ * mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k]);
+ } else {
+ mode_lib->vba.prefetch_row_bw[k] = 0.0;
+ mode_lib->vba.prefetch_row_bw_valid = false;
+ }
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch =
+ mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max4(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]);
+ }
+ mode_lib->vba.PrefetchSupported[i] = true;
+ if (mode_lib->vba.MaximumReadBandwidthWithPrefetch
+ > mode_lib->vba.ReturnBWPerState[i]
+ || mode_lib->vba.prefetch_vm_bw_valid == false
+ || mode_lib->vba.prefetch_row_bw_valid == false) {
+ mode_lib->vba.PrefetchSupported[i] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.LineTimesForPrefetch[k] < 2.0
+ || mode_lib->vba.LinesForMetaPTE[k] >= 8.0
+ || mode_lib->vba.LinesForMetaAndDPTERow[k] >= 16.0
+ || mode_lib->vba.IsErrorResult[i][k] == true) {
+ mode_lib->vba.PrefetchSupported[i] = false;
+ }
+ }
+ mode_lib->vba.VRatioInPrefetchSupported[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.VRatioPreY[i][k] > 4.0
+ || mode_lib->vba.VRatioPreC[i][k] > 4.0
+ || mode_lib->vba.IsErrorResult[i][k] == true) {
+ mode_lib->vba.VRatioInPrefetchSupported[i] = false;
+ }
+ }
+ if (mode_lib->vba.PrefetchSupported[i] == true
+ && mode_lib->vba.VRatioInPrefetchSupported[i] == true) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.ReturnBWPerState[i];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.BandwidthAvailableForImmediateFlip =
+ mode_lib->vba.BandwidthAvailableForImmediateFlip
+ - mode_lib->vba.cursor_bw[k]
+ - dml_max(
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.PrefetchBW[k]);
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.ImmediateFlipBytes[k] = 0.0;
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.ImmediateFlipBytes[k] =
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k]
+ + mode_lib->vba.MetaRowBytes[k]
+ + mode_lib->vba.DPTEBytesPerRow[k];
+ }
+ }
+ mode_lib->vba.TotImmediateFlipBytes = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if ((mode_lib->vba.SourcePixelFormat[k] != dm_420_8
+ && mode_lib->vba.SourcePixelFormat[k] != dm_420_10)) {
+ mode_lib->vba.TotImmediateFlipBytes =
+ mode_lib->vba.TotImmediateFlipBytes
+ + mode_lib->vba.ImmediateFlipBytes[k];
+ }
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ CalculateFlipSchedule(
+ mode_lib,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.MaxPageTableLevels,
+ mode_lib->vba.VirtualMemoryEnable,
+ mode_lib->vba.BandwidthAvailableForImmediateFlip,
+ mode_lib->vba.TotImmediateFlipBytes,
+ mode_lib->vba.SourcePixelFormat[k],
+ mode_lib->vba.ImmediateFlipBytes[k],
+ mode_lib->vba.HTotal[k]
+ / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.VRatio[k],
+ mode_lib->vba.Tno_bw[k],
+ mode_lib->vba.PDEAndMetaPTEBytesPerFrame[k],
+ mode_lib->vba.MetaRowBytes[k],
+ mode_lib->vba.DPTEBytesPerRow[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.dpte_row_height[k],
+ mode_lib->vba.meta_row_height[k],
+ mode_lib->vba.qual_row_bw[k],
+ &mode_lib->vba.DestinationLinesToRequestVMInImmediateFlip[k],
+ &mode_lib->vba.DestinationLinesToRequestRowInImmediateFlip[k],
+ &mode_lib->vba.final_flip_bw[k],
+ &mode_lib->vba.ImmediateFlipSupportedForPipe[k]);
+ }
+ mode_lib->vba.total_dcn_read_bw_with_flip = 0.0;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.total_dcn_read_bw_with_flip =
+ mode_lib->vba.total_dcn_read_bw_with_flip
+ + mode_lib->vba.cursor_bw[k]
+ + dml_max3(
+ mode_lib->vba.prefetch_vm_bw[k],
+ mode_lib->vba.prefetch_row_bw[k],
+ mode_lib->vba.final_flip_bw[k]
+ + dml_max(
+ mode_lib->vba.ReadBandwidth[k],
+ mode_lib->vba.RequiredPrefetchPixelDataBW[i][k]));
+ }
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = true;
+ if (mode_lib->vba.total_dcn_read_bw_with_flip
+ > mode_lib->vba.ReturnBWPerState[i]) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.ImmediateFlipSupportedForPipe[k] == false) {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ }
+ } else {
+ mode_lib->vba.ImmediateFlipSupportedForState[i] = false;
+ }
+ }
+ /*PTE Buffer Size Check*/
+
+ for (i = 0; i <= DC__VOLTAGE_STATES; i++) {
+ mode_lib->vba.PTEBufferSizeNotExceeded[i] = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.PTEBufferSizeNotExceededY[i][k] == false
+ || mode_lib->vba.PTEBufferSizeNotExceededC[i][k] == false) {
+ mode_lib->vba.PTEBufferSizeNotExceeded[i] = false;
+ }
+ }
+ }
+ /*Cursor Support Check*/
+
+ mode_lib->vba.CursorSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.CursorWidth[k][0] > 0.0) {
+ if (dml_floor(
+ dml_floor(
+ mode_lib->vba.CursorBufferSize
+ - mode_lib->vba.CursorChunkSize,
+ mode_lib->vba.CursorChunkSize) * 1024.0
+ / (mode_lib->vba.CursorWidth[k][0]
+ * mode_lib->vba.CursorBPP[k][0]
+ / 8.0),
+ 1.0)
+ * (mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k])
+ / mode_lib->vba.VRatio[k] < mode_lib->vba.UrgentLatency
+ || (mode_lib->vba.CursorBPP[k][0] == 64.0
+ && mode_lib->vba.Cursor64BppSupport == false)) {
+ mode_lib->vba.CursorSupport = false;
+ }
+ }
+ }
+ /*Valid Pitch Check*/
+
+ mode_lib->vba.PitchSupport = true;
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.AlignedYPitch[k] = dml_ceil(
+ dml_max(mode_lib->vba.PitchY[k], mode_lib->vba.ViewportWidth[k]),
+ mode_lib->vba.MacroTileWidthY[k]);
+ if (mode_lib->vba.AlignedYPitch[k] > mode_lib->vba.PitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.DCCEnable[k] == true) {
+ mode_lib->vba.AlignedDCCMetaPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.DCCMetaPitchY[k],
+ mode_lib->vba.ViewportWidth[k]),
+ 64.0 * mode_lib->vba.Read256BlockWidthY[k]);
+ } else {
+ mode_lib->vba.AlignedDCCMetaPitch[k] = mode_lib->vba.DCCMetaPitchY[k];
+ }
+ if (mode_lib->vba.AlignedDCCMetaPitch[k] > mode_lib->vba.DCCMetaPitchY[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_32
+ && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_16
+ && mode_lib->vba.SourcePixelFormat[k] != dm_mono_8) {
+ mode_lib->vba.AlignedCPitch[k] = dml_ceil(
+ dml_max(
+ mode_lib->vba.PitchC[k],
+ mode_lib->vba.ViewportWidth[k] / 2.0),
+ mode_lib->vba.MacroTileWidthC[k]);
+ } else {
+ mode_lib->vba.AlignedCPitch[k] = mode_lib->vba.PitchC[k];
+ }
+ if (mode_lib->vba.AlignedCPitch[k] > mode_lib->vba.PitchC[k]) {
+ mode_lib->vba.PitchSupport = false;
+ }
+ }
+ /*Mode Support, Voltage State and SOC Configuration*/
+
+ for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
+ if (mode_lib->vba.ScaleRatioAndTapsSupport == true
+ && mode_lib->vba.SourceFormatPixelAndScanSupport == true
+ && mode_lib->vba.ViewportSizeSupport[i] == true
+ && mode_lib->vba.BandwidthSupport[i] == true
+ && mode_lib->vba.DIOSupport[i] == true
+ && mode_lib->vba.NotEnoughDSCUnits[i] == false
+ && mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] == false
+ && mode_lib->vba.UrgentLatencySupport[i] == true
+ && mode_lib->vba.ROBSupport[i] == true
+ && mode_lib->vba.DISPCLK_DPPCLK_Support[i] == true
+ && mode_lib->vba.TotalAvailablePipesSupport[i] == true
+ && mode_lib->vba.NumberOfOTGSupport == true
+ && mode_lib->vba.WritebackModeSupport == true
+ && mode_lib->vba.WritebackLatencySupport == true
+ && mode_lib->vba.WritebackScaleRatioAndTapsSupport == true
+ && mode_lib->vba.CursorSupport == true
+ && mode_lib->vba.PitchSupport == true
+ && mode_lib->vba.PrefetchSupported[i] == true
+ && mode_lib->vba.VRatioInPrefetchSupported[i] == true
+ && mode_lib->vba.PTEBufferSizeNotExceeded[i] == true
+ && mode_lib->vba.NonsupportedDSCInputBPC == false) {
+ mode_lib->vba.ModeSupport[i] = true;
+ } else {
+ mode_lib->vba.ModeSupport[i] = false;
+ }
+ }
+ for (i = DC__VOLTAGE_STATES; i >= 0; i--) {
+ if (i == DC__VOLTAGE_STATES || mode_lib->vba.ModeSupport[i] == true) {
+ mode_lib->vba.VoltageLevel = i;
+ }
+ }
+ mode_lib->vba.DCFCLK = mode_lib->vba.DCFCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.DRAMSpeed = mode_lib->vba.DRAMSpeedPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricClock = mode_lib->vba.FabricClockPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.SOCCLK = mode_lib->vba.SOCCLKPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.FabricAndDRAMBandwidth =
+ mode_lib->vba.FabricAndDRAMBandwidthPerState[mode_lib->vba.VoltageLevel];
+ mode_lib->vba.ImmediateFlipSupport =
+ mode_lib->vba.ImmediateFlipSupportedForState[mode_lib->vba.VoltageLevel];
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ mode_lib->vba.DPPPerPlane[k] = mode_lib->vba.NoOfDPP[mode_lib->vba.VoltageLevel][k];
+ }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ mode_lib->vba.ODMCombineEnabled[k] =
+ mode_lib->vba.ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
+ } else {
+ mode_lib->vba.ODMCombineEnabled[k] = 0;
+ }
+ mode_lib->vba.DSCEnabled[k] =
+ mode_lib->vba.RequiresDSC[mode_lib->vba.VoltageLevel][k];
+ mode_lib->vba.OutputBpp[k] =
+ mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k];
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
new file mode 100644
index 000000000000..4112409cd974
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -0,0 +1,598 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML2_DISPLAY_MODE_VBA_H__
+#define __DML2_DISPLAY_MODE_VBA_H__
+
+#include "dml_common_defs.h"
+
+struct display_mode_lib;
+
+void set_prefetch_mode(struct display_mode_lib *mode_lib,
+ bool cstate_en,
+ bool pstate_en,
+ bool ignore_viewport_pos,
+ bool immediate_flip_support);
+
+#define dml_get_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes)
+
+dml_get_attr_decl(clk_dcf_deepsleep);
+dml_get_attr_decl(wm_urgent);
+dml_get_attr_decl(wm_memory_trip);
+dml_get_attr_decl(wm_writeback_urgent);
+dml_get_attr_decl(wm_stutter_exit);
+dml_get_attr_decl(wm_stutter_enter_exit);
+dml_get_attr_decl(wm_dram_clock_change);
+dml_get_attr_decl(wm_writeback_dram_clock_change);
+dml_get_attr_decl(wm_xfc_underflow);
+dml_get_attr_decl(stutter_efficiency_no_vblank);
+dml_get_attr_decl(stutter_efficiency);
+dml_get_attr_decl(urgent_latency);
+dml_get_attr_decl(urgent_extra_latency);
+dml_get_attr_decl(nonurgent_latency);
+dml_get_attr_decl(dram_clock_change_latency);
+dml_get_attr_decl(dispclk_calculated);
+dml_get_attr_decl(total_data_read_bw);
+dml_get_attr_decl(return_bw);
+dml_get_attr_decl(tcalc);
+
+#define dml_get_pipe_attr_decl(attr) double get_##attr(struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, unsigned int num_pipes, unsigned int which_pipe)
+
+dml_get_pipe_attr_decl(dsc_delay);
+dml_get_pipe_attr_decl(dppclk_calculated);
+dml_get_pipe_attr_decl(dscclk_calculated);
+dml_get_pipe_attr_decl(min_ttu_vblank);
+dml_get_pipe_attr_decl(vratio_prefetch_l);
+dml_get_pipe_attr_decl(vratio_prefetch_c);
+dml_get_pipe_attr_decl(dst_x_after_scaler);
+dml_get_pipe_attr_decl(dst_y_after_scaler);
+dml_get_pipe_attr_decl(dst_y_per_vm_vblank);
+dml_get_pipe_attr_decl(dst_y_per_row_vblank);
+dml_get_pipe_attr_decl(dst_y_prefetch);
+dml_get_pipe_attr_decl(dst_y_per_vm_flip);
+dml_get_pipe_attr_decl(dst_y_per_row_flip);
+dml_get_pipe_attr_decl(xfc_transfer_delay);
+dml_get_pipe_attr_decl(xfc_precharge_delay);
+dml_get_pipe_attr_decl(xfc_remote_surface_flip_latency);
+dml_get_pipe_attr_decl(xfc_prefetch_margin);
+
+unsigned int get_vstartup_calculated(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes,
+ unsigned int which_pipe);
+
+double get_total_immediate_flip_bytes(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+double get_total_immediate_flip_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+double get_total_prefetch_bw(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+
+unsigned int dml_get_voltage_level(
+ struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *pipes,
+ unsigned int num_pipes);
+
+bool Calculate256BBlockSizes(
+ enum source_format_class SourcePixelFormat,
+ enum dm_swizzle_mode SurfaceTiling,
+ unsigned int BytePerPixelY,
+ unsigned int BytePerPixelC,
+ unsigned int *BlockHeight256BytesY,
+ unsigned int *BlockHeight256BytesC,
+ unsigned int *BlockWidth256BytesY,
+ unsigned int *BlockWidth256BytesC);
+
+
+struct vba_vars_st {
+ ip_params_st ip;
+ soc_bounding_box_st soc;
+
+ unsigned int MaximumMaxVStartupLines;
+ double cursor_bw[DC__NUM_DPP__MAX];
+ double meta_row_bw[DC__NUM_DPP__MAX];
+ double dpte_row_bw[DC__NUM_DPP__MAX];
+ double qual_row_bw[DC__NUM_DPP__MAX];
+ double WritebackDISPCLK;
+ double PSCL_THROUGHPUT_LUMA[DC__NUM_DPP__MAX];
+ double PSCL_THROUGHPUT_CHROMA[DC__NUM_DPP__MAX];
+ double DPPCLKUsingSingleDPPLuma;
+ double DPPCLKUsingSingleDPPChroma;
+ double DPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
+ double DISPCLKWithRamping;
+ double DISPCLKWithoutRamping;
+ double GlobalDPPCLK;
+ double DISPCLKWithRampingRoundedToDFSGranularity;
+ double DISPCLKWithoutRampingRoundedToDFSGranularity;
+ double MaxDispclkRoundedToDFSGranularity;
+ bool DCCEnabledAnyPlane;
+ double ReturnBandwidthToDCN;
+ unsigned int SwathWidthY[DC__NUM_DPP__MAX];
+ unsigned int SwathWidthSingleDPPY[DC__NUM_DPP__MAX];
+ double BytePerPixelDETY[DC__NUM_DPP__MAX];
+ double BytePerPixelDETC[DC__NUM_DPP__MAX];
+ double ReadBandwidthPlaneLuma[DC__NUM_DPP__MAX];
+ double ReadBandwidthPlaneChroma[DC__NUM_DPP__MAX];
+ unsigned int TotalActiveDPP;
+ unsigned int TotalDCCActiveDPP;
+ double UrgentRoundTripAndOutOfOrderLatency;
+ double DisplayPipeLineDeliveryTimeLuma[DC__NUM_DPP__MAX]; // WM
+ double DisplayPipeLineDeliveryTimeChroma[DC__NUM_DPP__MAX]; // WM
+ double LinesInDETY[DC__NUM_DPP__MAX]; // WM
+ double LinesInDETC[DC__NUM_DPP__MAX]; // WM
+ unsigned int LinesInDETYRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
+ unsigned int LinesInDETCRoundedDownToSwath[DC__NUM_DPP__MAX]; // WM
+ double FullDETBufferingTimeY[DC__NUM_DPP__MAX]; // WM
+ double FullDETBufferingTimeC[DC__NUM_DPP__MAX]; // WM
+ double MinFullDETBufferingTime;
+ double FrameTimeForMinFullDETBufferingTime;
+ double AverageReadBandwidthGBytePerSecond;
+ double PartOfBurstThatFitsInROB;
+ double StutterBurstTime;
+ //unsigned int NextPrefetchMode;
+ double VBlankTime;
+ double SmallestVBlank;
+ double DCFCLKDeepSleepPerPlane;
+ double EffectiveDETPlusLBLinesLuma;
+ double EffectiveDETPlusLBLinesChroma;
+ double UrgentLatencySupportUsLuma;
+ double UrgentLatencySupportUsChroma;
+ double UrgentLatencySupportUs[DC__NUM_DPP__MAX];
+ unsigned int DSCFormatFactor;
+ unsigned int BlockHeight256BytesY[DC__NUM_DPP__MAX];
+ unsigned int BlockHeight256BytesC[DC__NUM_DPP__MAX];
+ unsigned int BlockWidth256BytesY[DC__NUM_DPP__MAX];
+ unsigned int BlockWidth256BytesC[DC__NUM_DPP__MAX];
+ double VInitPreFillY[DC__NUM_DPP__MAX];
+ double VInitPreFillC[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwathY[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwathC[DC__NUM_DPP__MAX];
+ double PrefetchSourceLinesY[DC__NUM_DPP__MAX];
+ double PrefetchSourceLinesC[DC__NUM_DPP__MAX];
+ double PixelPTEBytesPerRow[DC__NUM_DPP__MAX];
+ double MetaRowByte[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height[DC__NUM_DPP__MAX];
+ unsigned int dpte_row_height_chroma[DC__NUM_DPP__MAX];
+ unsigned int meta_row_height[DC__NUM_DPP__MAX];
+ unsigned int meta_row_height_chroma[DC__NUM_DPP__MAX];
+
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MaxVStartupLines[DC__NUM_DPP__MAX];
+ double WritebackDelay[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PrefetchModeSupported;
+ bool AllowDRAMClockChangeDuringVBlank[DC__NUM_DPP__MAX];
+ bool AllowDRAMSelfRefreshDuringVBlank[DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixDataBW[DC__NUM_DPP__MAX];
+ double XFCRemoteSurfaceFlipDelay;
+ double TInitXFill;
+ double TslvChk;
+ double SrcActiveDrainRate;
+ double Tno_bw[DC__NUM_DPP__MAX];
+ bool ImmediateFlipSupported;
+
+ double prefetch_vm_bw[DC__NUM_DPP__MAX];
+ double prefetch_row_bw[DC__NUM_DPP__MAX];
+ bool ImmediateFlipSupportedForPipe[DC__NUM_DPP__MAX];
+ unsigned int VStartupLines;
+ double DisplayPipeLineDeliveryTimeLumaPrefetch[DC__NUM_DPP__MAX];
+ double DisplayPipeLineDeliveryTimeChromaPrefetch[DC__NUM_DPP__MAX];
+ unsigned int ActiveDPPs;
+ unsigned int LBLatencyHidingSourceLinesY;
+ unsigned int LBLatencyHidingSourceLinesC;
+ double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX];
+ double MinActiveDRAMClockChangeMargin;
+ double XFCSlaveVUpdateOffset[DC__NUM_DPP__MAX];
+ double XFCSlaveVupdateWidth[DC__NUM_DPP__MAX];
+ double XFCSlaveVReadyOffset[DC__NUM_DPP__MAX];
+ double InitFillLevel;
+ double FinalFillMargin;
+ double FinalFillLevel;
+ double RemainingFillLevel;
+ double TFinalxFill;
+
+
+ //
+ // SOC Bounding Box Parameters
+ //
+ double SRExitTime;
+ double SREnterPlusExitTime;
+ double UrgentLatency;
+ double WritebackLatency;
+ double PercentOfIdealDRAMAndFabricBWReceivedAfterUrgLatency;
+ double NumberOfChannels;
+ double DRAMChannelWidth;
+ double FabricDatapathToDCNDataReturn;
+ double ReturnBusWidth;
+ double Downspreading;
+ double DISPCLKDPPCLKDSCCLKDownSpreading;
+ double DISPCLKDPPCLKVCOSpeed;
+ double RoundTripPingLatencyCycles;
+ double UrgentOutOfOrderReturnPerChannel;
+ unsigned int VMMPageSize;
+ double DRAMClockChangeLatency;
+ double XFCBusTransportTime;
+ double XFCXBUFLatencyTolerance;
+
+ //
+ // IP Parameters
+ //
+ unsigned int ROBBufferSizeInKByte;
+ double DETBufferSizeInKByte;
+ unsigned int DPPOutputBufferPixels;
+ unsigned int OPPOutputBufferLines;
+ unsigned int PixelChunkSizeInKByte;
+ double ReturnBW;
+ bool VirtualMemoryEnable;
+ unsigned int MaxPageTableLevels;
+ unsigned int OverridePageTableLevels;
+ unsigned int PTEChunkSize;
+ unsigned int MetaChunkSize;
+ unsigned int WritebackChunkSize;
+ bool ODMCapability;
+ unsigned int NumberOfDSC;
+ unsigned int LineBufferSize;
+ unsigned int MaxLineBufferLines;
+ unsigned int WritebackInterfaceLumaBufferSize;
+ unsigned int WritebackInterfaceChromaBufferSize;
+ unsigned int WritebackChromaLineBufferWidth;
+ double MaxDCHUBToPSCLThroughput;
+ double MaxPSCLToLBThroughput;
+ unsigned int PTEBufferSizeInRequests;
+ double DISPCLKRampingMargin;
+ unsigned int MaxInterDCNTileRepeaters;
+ bool XFCSupported;
+ double XFCSlvChunkSize;
+ double XFCFillBWOverhead;
+ double XFCFillConstant;
+ double XFCTSlvVupdateOffset;
+ double XFCTSlvVupdateWidth;
+ double XFCTSlvVreadyOffset;
+ double DPPCLKDelaySubtotal;
+ double DPPCLKDelaySCL;
+ double DPPCLKDelaySCLLBOnly;
+ double DPPCLKDelayCNVCFormater;
+ double DPPCLKDelayCNVCCursor;
+ double DISPCLKDelaySubtotal;
+ bool ProgressiveToInterlaceUnitInOPP;
+ unsigned int PDEProcessingBufIn64KBReqs;
+
+ // Pipe/Plane Parameters
+ int VoltageLevel;
+ double FabricAndDRAMBandwidth;
+ double FabricClock;
+ double DRAMSpeed;
+ double DISPCLK;
+ double SOCCLK;
+ double DCFCLK;
+
+ unsigned int NumberOfActivePlanes;
+ unsigned int ViewportWidth[DC__NUM_DPP__MAX];
+ unsigned int ViewportHeight[DC__NUM_DPP__MAX];
+ unsigned int ViewportYStartY[DC__NUM_DPP__MAX];
+ unsigned int ViewportYStartC[DC__NUM_DPP__MAX];
+ unsigned int PitchY[DC__NUM_DPP__MAX];
+ unsigned int PitchC[DC__NUM_DPP__MAX];
+ double HRatio[DC__NUM_DPP__MAX];
+ double VRatio[DC__NUM_DPP__MAX];
+ unsigned int htaps[DC__NUM_DPP__MAX];
+ unsigned int vtaps[DC__NUM_DPP__MAX];
+ unsigned int HTAPsChroma[DC__NUM_DPP__MAX];
+ unsigned int VTAPsChroma[DC__NUM_DPP__MAX];
+ unsigned int HTotal[DC__NUM_DPP__MAX];
+ unsigned int VTotal[DC__NUM_DPP__MAX];
+ unsigned int DPPPerPlane[DC__NUM_DPP__MAX];
+ double PixelClock[DC__NUM_DPP__MAX];
+ double PixelClockBackEnd[DC__NUM_DPP__MAX];
+ double DPPCLK[DC__NUM_DPP__MAX];
+ bool DCCEnable[DC__NUM_DPP__MAX];
+ unsigned int DCCMetaPitchY[DC__NUM_DPP__MAX];
+ enum scan_direction_class SourceScan[DC__NUM_DPP__MAX];
+ enum source_format_class SourcePixelFormat[DC__NUM_DPP__MAX];
+ bool WritebackEnable[DC__NUM_DPP__MAX];
+ double WritebackDestinationWidth[DC__NUM_DPP__MAX];
+ double WritebackDestinationHeight[DC__NUM_DPP__MAX];
+ double WritebackSourceHeight[DC__NUM_DPP__MAX];
+ enum source_format_class WritebackPixelFormat[DC__NUM_DPP__MAX];
+ unsigned int WritebackLumaHTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackLumaVTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackChromaHTaps[DC__NUM_DPP__MAX];
+ unsigned int WritebackChromaVTaps[DC__NUM_DPP__MAX];
+ double WritebackHRatio[DC__NUM_DPP__MAX];
+ double WritebackVRatio[DC__NUM_DPP__MAX];
+ unsigned int HActive[DC__NUM_DPP__MAX];
+ unsigned int VActive[DC__NUM_DPP__MAX];
+ bool Interlace[DC__NUM_DPP__MAX];
+ enum dm_swizzle_mode SurfaceTiling[DC__NUM_DPP__MAX];
+ unsigned int ScalerRecoutWidth[DC__NUM_DPP__MAX];
+ bool DynamicMetadataEnable[DC__NUM_DPP__MAX];
+ unsigned int DynamicMetadataLinesBeforeActiveRequired[DC__NUM_DPP__MAX];
+ unsigned int DynamicMetadataTransmittedBytes[DC__NUM_DPP__MAX];
+ double DCCRate[DC__NUM_DPP__MAX];
+ bool ODMCombineEnabled[DC__NUM_DPP__MAX];
+ double OutputBpp[DC__NUM_DPP__MAX];
+ unsigned int NumberOfDSCSlices[DC__NUM_DPP__MAX];
+ bool DSCEnabled[DC__NUM_DPP__MAX];
+ unsigned int DSCDelay[DC__NUM_DPP__MAX];
+ unsigned int DSCInputBitPerComponent[DC__NUM_DPP__MAX];
+ enum output_format_class OutputFormat[DC__NUM_DPP__MAX];
+ enum output_encoder_class Output[DC__NUM_DPP__MAX];
+ unsigned int BlendingAndTiming[DC__NUM_DPP__MAX];
+ bool SynchronizedVBlank;
+ unsigned int NumberOfCursors[DC__NUM_DPP__MAX];
+ unsigned int CursorWidth[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
+ unsigned int CursorBPP[DC__NUM_DPP__MAX][DC__NUM_CURSOR__MAX];
+ bool XFCEnabled[DC__NUM_DPP__MAX];
+ bool ScalerEnabled[DC__NUM_DPP__MAX];
+
+ // Intermediates/Informational
+ bool ImmediateFlipSupport;
+ unsigned int SwathHeightY[DC__NUM_DPP__MAX];
+ unsigned int SwathHeightC[DC__NUM_DPP__MAX];
+ unsigned int DETBufferSizeY[DC__NUM_DPP__MAX];
+ unsigned int DETBufferSizeC[DC__NUM_DPP__MAX];
+ unsigned int LBBitPerPixel[DC__NUM_DPP__MAX];
+ double LastPixelOfLineExtraWatermark;
+ double TotalDataReadBandwidth;
+ unsigned int TotalActiveWriteback;
+ unsigned int EffectiveLBLatencyHidingSourceLinesLuma;
+ unsigned int EffectiveLBLatencyHidingSourceLinesChroma;
+ double BandwidthAvailableForImmediateFlip;
+ unsigned int PrefetchMode;
+ bool IgnoreViewportPositioning;
+ double PrefetchBandwidth[DC__NUM_DPP__MAX];
+ bool ErrorResult[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesFrame[DC__NUM_DPP__MAX];
+
+ //
+ // Calculated dml_ml->vba.Outputs
+ //
+ double DCFClkDeepSleep;
+ double UrgentWatermark;
+ double UrgentExtraLatency;
+ double MemoryTripWatermark;
+ double WritebackUrgentWatermark;
+ double StutterExitWatermark;
+ double StutterEnterPlusExitWatermark;
+ double DRAMClockChangeWatermark;
+ double WritebackDRAMClockChangeWatermark;
+ double StutterEfficiency;
+ double StutterEfficiencyNotIncludingVBlank;
+ double MinUrgentLatencySupportUs;
+ double NonUrgentLatencyTolerance;
+ double MinActiveDRAMClockChangeLatencySupported;
+ enum clock_change_support DRAMClockChangeSupport;
+
+ // These are the clocks calcuated by the library but they are not actually
+ // used explicitly. They are fetched by tests and then possibly used. The
+ // ultimate values to use are the ones specified by the parameters to DML
+ double DISPCLK_calculated;
+ double DSCCLK_calculated[DC__NUM_DPP__MAX];
+ double DPPCLK_calculated[DC__NUM_DPP__MAX];
+
+ unsigned int VStartup[DC__NUM_DPP__MAX];
+ unsigned int VUpdateOffsetPix[DC__NUM_DPP__MAX];
+ unsigned int VUpdateWidthPix[DC__NUM_DPP__MAX];
+ unsigned int VReadyOffsetPix[DC__NUM_DPP__MAX];
+ unsigned int VStartupRequiredWhenNotEnoughTimeForDynamicMetadata;
+
+ double ImmediateFlipBW;
+ unsigned int TotImmediateFlipBytes;
+ double TCalc;
+ double MinTTUVBlank[DC__NUM_DPP__MAX];
+ double VRatioPrefetchY[DC__NUM_DPP__MAX];
+ double VRatioPrefetchC[DC__NUM_DPP__MAX];
+ double DSTXAfterScaler[DC__NUM_DPP__MAX];
+ double DSTYAfterScaler[DC__NUM_DPP__MAX];
+
+ double DestinationLinesToRequestVMInVBlank[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestRowInVBlank[DC__NUM_DPP__MAX];
+ double DestinationLinesForPrefetch[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestRowInImmediateFlip[DC__NUM_DPP__MAX];
+ double DestinationLinesToRequestVMInImmediateFlip[DC__NUM_DPP__MAX];
+
+ double XFCTransferDelay[DC__NUM_DPP__MAX];
+ double XFCPrechargeDelay[DC__NUM_DPP__MAX];
+ double XFCRemoteSurfaceFlipLatency[DC__NUM_DPP__MAX];
+ double XFCPrefetchMargin[DC__NUM_DPP__MAX];
+
+ display_e2e_pipe_params_st cache_pipes[DC__NUM_DPP__MAX];
+ unsigned int cache_num_pipes;
+ unsigned int pipe_plane[DC__NUM_DPP__MAX];
+
+ /* vba mode support */
+ /*inputs*/
+ bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
+ double MaxHSCLRatio;
+ double MaxVSCLRatio;
+ unsigned int MaxNumWriteback;
+ bool WritebackLumaAndChromaScalingSupported;
+ bool Cursor64BppSupport;
+ double DCFCLKPerState[DC__VOLTAGE_STATES + 1];
+ double FabricClockPerState[DC__VOLTAGE_STATES + 1];
+ double SOCCLKPerState[DC__VOLTAGE_STATES + 1];
+ double PHYCLKPerState[DC__VOLTAGE_STATES + 1];
+ double MaxDppclk[DC__VOLTAGE_STATES + 1];
+ double MaxDSCCLK[DC__VOLTAGE_STATES + 1];
+ double DRAMSpeedPerState[DC__VOLTAGE_STATES + 1];
+ double MaxDispclk[DC__VOLTAGE_STATES + 1];
+
+ /*outputs*/
+ bool ScaleRatioAndTapsSupport;
+ bool SourceFormatPixelAndScanSupport;
+ unsigned int SwathWidthYSingleDPP[DC__NUM_DPP__MAX];
+ double BytePerPixelInDETY[DC__NUM_DPP__MAX];
+ double BytePerPixelInDETC[DC__NUM_DPP__MAX];
+ double TotalReadBandwidthConsumedGBytePerSecond;
+ double ReadBandwidth[DC__NUM_DPP__MAX];
+ double TotalWriteBandwidthConsumedGBytePerSecond;
+ double WriteBandwidth[DC__NUM_DPP__MAX];
+ double TotalBandwidthConsumedGBytePerSecond;
+ bool DCCEnabledInAnyPlane;
+ bool WritebackLatencySupport;
+ bool WritebackModeSupport;
+ bool Writeback10bpc420Supported;
+ bool BandwidthSupport[DC__VOLTAGE_STATES + 1];
+ unsigned int TotalNumberOfActiveWriteback;
+ double CriticalPoint;
+ double ReturnBWToDCNPerState;
+ double FabricAndDRAMBandwidthPerState[DC__VOLTAGE_STATES + 1];
+ double ReturnBWPerState[DC__VOLTAGE_STATES + 1];
+ double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES + 1];
+ bool ODMCombineEnablePerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PTEBufferSizeNotExceededY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PTEBufferSizeNotExceededC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool PrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool VRatioInPrefetchSupported[DC__VOLTAGE_STATES + 1];
+ bool DISPCLK_DPPCLK_Support[DC__VOLTAGE_STATES + 1];
+ bool TotalAvailablePipesSupport[DC__VOLTAGE_STATES + 1];
+ bool UrgentLatencySupport[DC__VOLTAGE_STATES + 1];
+ bool ModeSupport[DC__VOLTAGE_STATES + 1];
+ bool DIOSupport[DC__VOLTAGE_STATES + 1];
+ bool NotEnoughDSCUnits[DC__VOLTAGE_STATES + 1];
+ bool DSCCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES + 1];
+ bool ROBSupport[DC__VOLTAGE_STATES + 1];
+ bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES + 1];
+ bool RequiresDSC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool IsErrorResult[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ bool ViewportSizeSupport[DC__VOLTAGE_STATES + 1];
+ bool prefetch_vm_bw_valid;
+ bool prefetch_row_bw_valid;
+ bool NumberOfOTGSupport;
+ bool NonsupportedDSCInputBPC;
+ bool WritebackScaleRatioAndTapsSupport;
+ bool CursorSupport;
+ bool PitchSupport;
+
+ double WritebackLineBufferLumaBufferSize;
+ double WritebackLineBufferChromaBufferSize;
+ double WritebackMinHSCLRatio;
+ double WritebackMinVSCLRatio;
+ double WritebackMaxHSCLRatio;
+ double WritebackMaxVSCLRatio;
+ double WritebackMaxHSCLTaps;
+ double WritebackMaxVSCLTaps;
+ unsigned int MaxNumDPP;
+ unsigned int MaxNumOTG;
+ double CursorBufferSize;
+ double CursorChunkSize;
+ unsigned int Mode;
+ unsigned int NoOfDPP[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double OutputLinkDPLanes[DC__NUM_DPP__MAX];
+ double SwathWidthYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathHeightYPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double SwathHeightCPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double UrgentLatencySupportUsPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double VRatioPreY[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double VRatioPreC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredPrefetchPixelDataBW[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredDPPCLK[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double RequiredDISPCLK[DC__VOLTAGE_STATES + 1];
+ double TotalNumberOfActiveDPP[DC__VOLTAGE_STATES + 1];
+ double TotalNumberOfDCCActiveDPP[DC__VOLTAGE_STATES + 1];
+ double PrefetchBW[DC__NUM_DPP__MAX];
+ double PDEAndMetaPTEBytesPerFrame[DC__NUM_DPP__MAX];
+ double MetaRowBytes[DC__NUM_DPP__MAX];
+ double DPTEBytesPerRow[DC__NUM_DPP__MAX];
+ double PrefetchLinesY[DC__NUM_DPP__MAX];
+ double PrefetchLinesC[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwY[DC__NUM_DPP__MAX];
+ unsigned int MaxNumSwC[DC__NUM_DPP__MAX];
+ double PrefillY[DC__NUM_DPP__MAX];
+ double PrefillC[DC__NUM_DPP__MAX];
+ double LineTimesForPrefetch[DC__NUM_DPP__MAX];
+ double LinesForMetaPTE[DC__NUM_DPP__MAX];
+ double LinesForMetaAndDPTERow[DC__NUM_DPP__MAX];
+ double MinDPPCLKUsingSingleDPP[DC__NUM_DPP__MAX];
+ double RequiresFEC[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ unsigned int OutputBppPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ double DSCDelayPerState[DC__VOLTAGE_STATES + 1][DC__NUM_DPP__MAX];
+ unsigned int Read256BlockHeightY[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockWidthY[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockHeightC[DC__NUM_DPP__MAX];
+ unsigned int Read256BlockWidthC[DC__NUM_DPP__MAX];
+ unsigned int ImmediateFlipBytes[DC__NUM_DPP__MAX];
+ double MaxSwathHeightY[DC__NUM_DPP__MAX];
+ double MaxSwathHeightC[DC__NUM_DPP__MAX];
+ double MinSwathHeightY[DC__NUM_DPP__MAX];
+ double MinSwathHeightC[DC__NUM_DPP__MAX];
+ double PSCL_FACTOR[DC__NUM_DPP__MAX];
+ double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
+ double MaximumVStartup[DC__NUM_DPP__MAX];
+ double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
+ double AlignedYPitch[DC__NUM_DPP__MAX];
+ double AlignedCPitch[DC__NUM_DPP__MAX];
+ double MaximumSwathWidth[DC__NUM_DPP__MAX];
+ double final_flip_bw[DC__NUM_DPP__MAX];
+ double ImmediateFlipSupportedForState[DC__VOLTAGE_STATES + 1];
+
+ double WritebackLumaVExtra;
+ double WritebackChromaVExtra;
+ double WritebackRequiredDISPCLK;
+ double MaximumSwathWidthSupport;
+ double MaximumSwathWidthInDETBuffer;
+ double MaximumSwathWidthInLineBuffer;
+ double MaxDispclkRoundedDownToDFSGranularity;
+ double MaxDppclkRoundedDownToDFSGranularity;
+ double PlaneRequiredDISPCLKWithoutODMCombine;
+ double PlaneRequiredDISPCLK;
+ double TotalNumberOfActiveOTG;
+ double FECOverhead;
+ double EffectiveFECOverhead;
+ unsigned int Outbpp;
+ unsigned int OutbppDSC;
+ double TotalDSCUnitsRequired;
+ double bpp;
+ unsigned int slices;
+ double SwathWidthGranularityY;
+ double RoundedUpMaxSwathSizeBytesY;
+ double SwathWidthGranularityC;
+ double RoundedUpMaxSwathSizeBytesC;
+ double LinesInDETLuma;
+ double LinesInDETChroma;
+ double EffectiveDETLBLinesLuma;
+ double EffectiveDETLBLinesChroma;
+ double ProjectedDCFCLKDeepSleep;
+ double PDEAndMetaPTEBytesPerFrameY;
+ double PDEAndMetaPTEBytesPerFrameC;
+ unsigned int MetaRowBytesY;
+ unsigned int MetaRowBytesC;
+ unsigned int DPTEBytesPerRowC;
+ unsigned int DPTEBytesPerRowY;
+ double ExtraLatency;
+ double TimeCalc;
+ double TWait;
+ double MaximumReadBandwidthWithPrefetch;
+ double total_dcn_read_bw_with_flip;
+};
+
+#endif /* _DML2_DISPLAY_MODE_VBA_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
new file mode 100644
index 000000000000..8ba962df42e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.c
@@ -0,0 +1,1763 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_mode_lib.h"
+#include "display_mode_vba.h"
+#include "display_rq_dlg_calc.h"
+
+static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp);
+
+#include "dml_inline_defs.h"
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ } else if (source_format == dm_444_8) {
+ ret_val = 1;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+static double get_refcyc_per_delivery(struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ bool odm_combine,
+ unsigned int recout_width,
+ unsigned int hactive,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ if (odm_combine)
+ refcyc_per_delivery = (double) refclk_freq_in_mhz
+ * dml_min((double) recout_width, (double) hactive / 2.0)
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ else
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: recout_width = %d\n", __func__, recout_width);
+ dml_print("DML_DLG: %s: vratio = %3.2f\n", __func__, vratio);
+ dml_print("DML_DLG: %s: req_per_swath_ub = %d\n", __func__, req_per_swath_ub);
+ dml_print("DML_DLG: %s: refcyc_per_delivery= %3.2f\n", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
+ display_data_rq_regs_st *rq_regs,
+ const display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+static void extract_rq_regs(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ 1) - 3;
+
+ if (rq_param.yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ 1) - 3;
+ }
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ // FIXME: take the max between luma, chroma chunk size?
+ // okay for now, as we are setting chunk_bytes to 8kb anyways
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; // 2/3 to chroma
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = 0;
+ bool req128_c = 0;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request
+ req128_l = 0;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { //128b request (for luma only for yuv420 8bpc)
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+ // Note: assumption, the config that pass in will fit into
+ // the detiled buffer.
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = 0;
+ else
+ req128_l = 1;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ dml_print("DML_DLG: %s: req128_l = %0d\n", __func__, req128_l);
+ dml_print("DML_DLG: %s: req128_c = %0d\n", __func__, req128_c);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_l = %0d\n",
+ __func__,
+ full_swath_bytes_packed_l);
+ dml_print("DML_DLG: %s: full_swath_bytes_packed_c = %0d\n",
+ __func__,
+ full_swath_bytes_packed_c);
+}
+
+static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ unsigned int vp_width,
+ unsigned int vp_height,
+ unsigned int data_pitch,
+ unsigned int meta_pitch,
+ unsigned int source_format,
+ unsigned int tiling,
+ unsigned int macro_tile_size,
+ unsigned int source_scan,
+ unsigned int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element;
+ unsigned int bytes_per_element_y = get_bytes_per_element((enum source_format_class)(source_format),
+ false);
+ unsigned int bytes_per_element_c = get_bytes_per_element((enum source_format_class)(source_format),
+ true);
+
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int blk256_width_y = 0;
+ unsigned int blk256_height_y = 0;
+ unsigned int blk256_width_c = 0;
+ unsigned int blk256_height_c = 0;
+ unsigned int log2_bytes_per_element;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+
+ //full sized meta chunk width in unit of data elements
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ const unsigned int log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ const unsigned int dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+ const unsigned int pde_proc_buffer_size_64k_reqs =
+ mode_lib->ip.pde_proc_buffer_size_64k_reqs;
+
+ unsigned int log2_vmpg_height = 0;
+ unsigned int log2_vmpg_width = 0;
+ unsigned int log2_dpte_req_height_ptes = 0;
+ unsigned int log2_dpte_req_height = 0;
+ unsigned int log2_dpte_req_width = 0;
+ unsigned int log2_dpte_row_height_linear = 0;
+ unsigned int log2_dpte_row_height = 0;
+ unsigned int log2_dpte_group_width = 0;
+ unsigned int dpte_row_width_ub = 0;
+ unsigned int dpte_req_height = 0;
+ unsigned int dpte_req_width = 0;
+ unsigned int dpte_group_width = 0;
+ unsigned int log2_dpte_group_bytes = 0;
+ unsigned int log2_dpte_group_length = 0;
+ unsigned int pde_buf_entries;
+ bool yuv420 = (source_format == dm_420_8 || source_format == dm_420_10);
+
+ Calculate256BBlockSizes((enum source_format_class)(source_format),
+ (enum dm_swizzle_mode)(tiling),
+ bytes_per_element_y,
+ bytes_per_element_c,
+ &blk256_height_y,
+ &blk256_height_c,
+ &blk256_width_y,
+ &blk256_width_c);
+
+ if (!is_chroma) {
+ blk256_width = blk256_width_y;
+ blk256_height = blk256_height_y;
+ bytes_per_element = bytes_per_element_y;
+ } else {
+ blk256_width = blk256_width_c;
+ blk256_height = blk256_height_c;
+ bytes_per_element = bytes_per_element_c;
+ }
+
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+
+ dml_print("DML_DLG: %s: surf_linear = %d\n", __func__, surf_linear);
+ dml_print("DML_DLG: %s: surf_vert = %d\n", __func__, surf_vert);
+ dml_print("DML_DLG: %s: blk256_width = %d\n", __func__, blk256_width);
+ dml_print("DML_DLG: %s: blk256_height = %d\n", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ // remember log rule
+ // "+" in log is multiply
+ // "-" in log is divide
+ // "/2" is like square root
+ // blk is vertical biased
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; // blk height of 1
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_height - 1, blk256_height, 1)
+ + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ // -------
+ // meta
+ // -------
+ log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
+
+ // each 64b meta request for dcn is 8x8 meta elements and
+ // a meta element covers one 256b block of the the data surface.
+ log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ // the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ // calculate upper bound of the meta_row_width
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ //full sized meta chunk width in unit of data elements
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1) + meta_blk_height)
+ * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; //64B mpte request
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ dml_print("DML_DLG: %s: meta_blk_height = %d\n", __func__, meta_blk_height);
+ dml_print("DML_DLG: %s: meta_blk_width = %d\n", __func__, meta_blk_width);
+ dml_print("DML_DLG: %s: meta_surface_bytes = %d\n", __func__, meta_surface_bytes);
+ dml_print("DML_DLG: %s: meta_pte_req_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_req_per_frame_ub);
+ dml_print("DML_DLG: %s: meta_pte_bytes_per_frame_ub = %d\n",
+ __func__,
+ meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ // ------
+ // dpte
+ // ------
+ if (surf_linear) {
+ log2_vmpg_height = 0; // one line high
+ } else {
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ }
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ // only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4.
+ if (surf_linear) { //one 64B PTE request returns 8 PTEs
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_vmpg_width + 3;
+ log2_dpte_req_height = 0;
+ } else if (log2_blk_bytes == 12) { //4KB tile means 4kB page size
+ //one 64B req gives 8x1 PTEs for 4KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ } else if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) { // tile block >= 64KB
+ //two 64B reqs of 2x4 PTEs give 16 PTEs to cover 64KB
+ log2_dpte_req_height_ptes = 4;
+ log2_dpte_req_width = log2_blk256_width + 4; // log2_64KB_width
+ log2_dpte_req_height = log2_blk256_height + 4; // log2_64KB_height
+ } else { //64KB page size and must 64KB tile block
+ //one 64B req gives 8x1 PTEs for 64KB tile
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width = log2_blk_width + 3;
+ log2_dpte_req_height = log2_blk_height + 0;
+ }
+
+ // The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ // log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ // That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ //log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ //log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ // calculate pitch dpte row buffer can hold
+ // round the result down to a power of two.
+ pde_buf_entries = yuv420 ? (pde_proc_buffer_size_64k_reqs >> 1) : pde_proc_buffer_size_64k_reqs;
+ if (surf_linear) {
+ unsigned int dpte_row_height;
+
+ log2_dpte_row_height_linear = dml_floor(dml_log2(dml_min(64 * 1024 * pde_buf_entries
+ / bytes_per_element,
+ dpte_buf_in_pte_reqs
+ * dpte_req_width)
+ / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ // For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ // the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ dpte_row_height = 1 << log2_dpte_row_height;
+ dpte_row_width_ub = dml_round_to_multiple(data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ // the upper bound of the dpte_row_width without dependency on viewport position follows.
+ // for tiled mode, row height is the same as req height and row store up to vp size upper bound
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ }
+ if (log2_blk_bytes >= 16 && log2_vmpg_bytes == 12) // tile block >= 64KB
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 128; //2*64B dpte request
+ else
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64; //64B dpte request
+
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ // the dpte_group_bytes is reduced for the specific case of vertical
+ // access of a tile surface that has dpte request of 8x1 ptes.
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ //full size
+ rq_sizing_param->dpte_group_bytes = 2048;
+
+ //since pte request size is 64byte, the number of data pte requests per full sized group is as follows.
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; //length in 64b requests
+
+ // full sized data pte group width in elements
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ //But if the tile block >=64KB and the page size is 4KB, then each dPTE request is 2*64B
+ if ((log2_blk_bytes >= 16) && (log2_vmpg_bytes == 12)) // tile block >= 64KB
+ log2_dpte_group_width = log2_dpte_group_width - 1;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ // since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ // the upper bound for the dpte groups per row is as follows.
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil((double) dpte_row_width_ub / dpte_group_width,
+ 1);
+}
+
+static void get_surf_rq_param(struct display_mode_lib *mode_lib,
+ display_data_rq_sizing_params_st *rq_sizing_param,
+ display_data_rq_dlg_params_st *rq_dlg_param,
+ display_data_rq_misc_params_st *rq_misc_param,
+ const display_pipe_source_params_st pipe_src_param,
+ bool is_chroma)
+{
+ bool mode_422 = 0;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+
+ // FIXME check if ppe apply for both luma and chroma in 422 case
+ if (is_chroma) {
+ vp_width = pipe_src_param.viewport_width_c / ppe;
+ vp_height = pipe_src_param.viewport_height_c;
+ data_pitch = pipe_src_param.data_pitch_c;
+ meta_pitch = pipe_src_param.meta_pitch_c;
+ } else {
+ vp_width = pipe_src_param.viewport_width / ppe;
+ vp_height = pipe_src_param.viewport_height;
+ data_pitch = pipe_src_param.data_pitch;
+ meta_pitch = pipe_src_param.meta_pitch;
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ get_meta_and_pte_attr(mode_lib,
+ rq_dlg_param,
+ rq_misc_param,
+ rq_sizing_param,
+ vp_width,
+ vp_height,
+ data_pitch,
+ meta_pitch,
+ pipe_src_param.source_format,
+ pipe_src_param.sw_mode,
+ pipe_src_param.macro_tile_size,
+ pipe_src_param.source_scan,
+ is_chroma);
+}
+
+void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ // get param for luma surface
+ rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
+ || pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_src_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ // get param for chroma surface
+ get_surf_rq_param(mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_src_param,
+ 1);
+ }
+
+ // calculate how to split the det buffer space between luma and chroma
+ handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+void dml_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_source_params_st pipe_src_param)
+{
+ display_rq_params_st rq_param = {0};
+
+ memset(rq_regs, 0, sizeof(*rq_regs));
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_src_param);
+ extract_rq_regs(mode_lib, rq_regs, rq_param);
+
+ print__rq_regs_st(mode_lib, *rq_regs);
+}
+
+// Note: currently taken in as is.
+// Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src;
+ const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest;
+ const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
+ const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+ const scaler_ratio_depth_st *scl = &e2e_pipe_param[pipe_idx].pipe.scale_ratio_depth;
+ const scaler_taps_st *taps = &e2e_pipe_param[pipe_idx].pipe.scale_taps;
+
+ // -------------------------
+ // Section 1.15.2.1: OTG dependent Params
+ // -------------------------
+ // Timing
+ unsigned int htotal = dst->htotal;
+// unsigned int hblank_start = dst.hblank_start; // TODO: Remove
+ unsigned int hblank_end = dst->hblank_end;
+ unsigned int vblank_start = dst->vblank_start;
+ unsigned int vblank_end = dst->vblank_end;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double dppclk_freq_in_mhz = clks->dppclk_mhz;
+ double dispclk_freq_in_mhz = clks->dispclk_mhz;
+ double refclk_freq_in_mhz = clks->refclk_mhz;
+ double pclk_freq_in_mhz = dst->pixel_rate_mhz;
+ bool interlaced = dst->interlaced;
+
+ double ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+
+ // Scaling
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratio_l;
+ double hratio_c;
+ double vratio_l;
+ double vratio_c;
+ bool scl_enable;
+
+ double line_time_in_us;
+ // double vinit_l;
+ // double vinit_c;
+ // double vinit_bot_l;
+ // double vinit_bot_c;
+
+ // unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ // unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ // unsigned int meta_pte_bytes_per_frame_ub_l;
+ // unsigned int meta_bytes_per_row_ub_l;
+
+ // unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ // unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int meta_chunks_per_row_ub_c;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double dst_y_prefetch;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double dst_y_per_vm_flip;
+ double dst_y_per_row_flip;
+ double min_dst_y_per_vm_vblank;
+ double min_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int meta_row_height_c;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+
+ unsigned int full_recout_width;
+ double xfc_transfer_delay;
+ double xfc_precharge_delay;
+ double xfc_remote_surface_flip_latency;
+ double xfc_dst_y_delta_drq_limit;
+ double xfc_prefetch_margin;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ double refcyc_per_req_delivery_pre_cur1;
+ double refcyc_per_req_delivery_cur1;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ dml_print("DML_DLG: %s: cstate_en = %d\n", __func__, cstate_en);
+ dml_print("DML_DLG: %s: pstate_en = %d\n", __func__, pstate_en);
+ dml_print("DML_DLG: %s: vm_en = %d\n", __func__, vm_en);
+ dml_print("DML_DLG: %s: ignore_viewport_pos = %d\n", __func__, ignore_viewport_pos);
+ dml_print("DML_DLG: %s: immediate_flip_support = %d\n", __func__, immediate_flip_support);
+
+ dml_print("DML_DLG: %s: dppclk_freq_in_mhz = %3.2f\n", __func__, dppclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: dispclk_freq_in_mhz = %3.2f\n", __func__, dispclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: refclk_freq_in_mhz = %3.2f\n", __func__, refclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: pclk_freq_in_mhz = %3.2f\n", __func__, pclk_freq_in_mhz);
+ dml_print("DML_DLG: %s: interlaced = %d\n", __func__, interlaced);
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; // 15 bits
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
+
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ set_prefetch_mode(mode_lib, cstate_en, pstate_en, ignore_viewport_pos, immediate_flip_support);
+ t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
+ min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
+
+ dml_print("DML_DLG: %s: min_dcfclk_mhz = %3.2f\n",
+ __func__,
+ min_dcfclk_mhz);
+ dml_print("DML_DLG: %s: min_ttu_vblank = %3.2f\n",
+ __func__,
+ min_ttu_vblank);
+ dml_print("DML_DLG: %s: min_dst_y_ttu_vblank = %3.2f\n",
+ __func__,
+ min_dst_y_ttu_vblank);
+ dml_print("DML_DLG: %s: t_calc_us = %3.2f\n",
+ __func__,
+ t_calc_us);
+ dml_print("DML_DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x\n",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ dml_print("DML_DLG: %s: ref_freq_to_pix_freq = %3.2f\n",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ // -------------------------
+ // Section 1.15.2.2: Prefetch, Active and TTU
+ // -------------------------
+ // Prefetch Calc
+ // Source
+// dcc_en = src.dcc;
+ dual_plane = is_dual_plane((enum source_format_class)(src->source_format));
+ mode_422 = 0; // FIXME
+ access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+// bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0);
+// bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+ vp_height_l = src->viewport_height;
+ vp_width_l = src->viewport_width;
+ vp_height_c = src->viewport_height_c;
+ vp_width_c = src->viewport_width_c;
+
+ // Scaling
+ htaps_l = taps->htaps;
+ htaps_c = taps->htaps_c;
+ hratio_l = scl->hscl_ratio;
+ hratio_c = scl->hscl_ratio_c;
+ vratio_l = scl->vscl_ratio;
+ vratio_c = scl->vscl_ratio_c;
+ scl_enable = scl->scl_enable;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+// vinit_l = scl.vinit;
+// vinit_c = scl.vinit_c;
+// vinit_bot_l = scl.vinit_bot;
+// vinit_bot_c = scl.vinit_bot_c;
+
+// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+
+// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ vupdate_offset = dst->vupdate_offset;
+ vupdate_width = dst->vupdate_width;
+ vready_offset = dst->vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+
+ if (scl_enable)
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl;
+ else
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_scl_lb_only;
+
+ dppclk_delay_subtotal += mode_lib->ip.dppclk_delay_cnvc_formatter
+ + src->num_cursors * mode_lib->ip.dppclk_delay_cnvc_cursor;
+
+ if (dout->dsc_enable) {
+ double dsc_delay = get_dsc_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dispclk_delay_subtotal += dsc_delay;
+ }
+
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = dst->vstartup_start;
+ if (interlaced) {
+ if (vstartup_start / 2.0
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end / 2.0)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ } else {
+ if (vstartup_start
+ - (double) (vready_offset + vupdate_width + vupdate_offset) / htotal
+ <= vblank_end)
+ disp_dlg_regs->vready_after_vcount0 = 1;
+ else
+ disp_dlg_regs->vready_after_vcount0 = 0;
+ }
+
+ // TODO: Where is this coming from?
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ // TODO: What if this min_vblank doesn't match the value in the dml_config_settings.cpp?
+ if (vstartup_start >= min_vblank) {
+ dml_print("WARNING: DML_DLG: %s: vblank_start=%d vblank_end=%d\n",
+ __func__,
+ vblank_start,
+ vblank_end);
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ dml_print("WARNING: DML_DLG: %s: vstartup_start=%d should be less than min_vblank=%d\n",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: htotal = %d\n", __func__, htotal);
+ dml_print("DML_DLG: %s: pixel_rate_delay_subtotal = %d\n",
+ __func__,
+ pixel_rate_delay_subtotal);
+ dml_print("DML_DLG: %s: dst_x_after_scaler = %d\n",
+ __func__,
+ dst_x_after_scaler);
+ dml_print("DML_DLG: %s: dst_y_after_scaler = %d\n",
+ __func__,
+ dst_y_after_scaler);
+
+ // Lwait
+ line_wait = mode_lib->soc.urgent_latency_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_us,
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ dst_y_prefetch = get_dst_y_prefetch(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dml_print("DML_DLG: %s: dst_y_prefetch (after rnd) = %3.2f\n", __func__, dst_y_prefetch);
+
+ dst_y_per_vm_vblank = get_dst_y_per_vm_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_row_vblank = get_dst_y_per_row_vblank(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ dst_y_per_vm_flip = get_dst_y_per_vm_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ dst_y_per_row_flip = get_dst_y_per_row_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ min_dst_y_per_vm_vblank = 8.0;
+ min_dst_y_per_row_vblank = 16.0;
+
+ // magic!
+ if (htotal <= 75) {
+ min_vblank = 300;
+ min_dst_y_per_vm_vblank = 100.0;
+ min_dst_y_per_row_vblank = 100.0;
+ }
+
+ dml_print("DML_DLG: %s: dst_y_per_vm_vblank = %3.2f\n", __func__, dst_y_per_vm_vblank);
+ dml_print("DML_DLG: %s: dst_y_per_row_vblank = %3.2f\n", __func__, dst_y_per_row_vblank);
+
+ ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ dml_print("DML_DLG: %s: lsw = %3.2f\n", __func__, lsw);
+
+ vratio_pre_l = get_vratio_prefetch_l(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ vratio_pre_c = get_vratio_prefetch_c(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+
+ dml_print("DML_DLG: %s: vratio_pre_l=%3.2f\n", __func__, vratio_pre_l);
+ dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
+
+ // Active
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratio_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l * 2.0;
+ } else {
+ if (hratio_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratio_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratio_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c * 2.0;
+ } else {
+ if (hratio_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratio_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+
+ full_recout_width = 0;
+ // In ODM
+ if (src->is_hsplit) {
+ // This "hack" is only allowed (and valid) for MPC combine. In ODM
+ // combine, you MUST specify the full_recout_width...according to Oswin
+ if (dst->full_recout_width == 0 && !dst->odm_combine) {
+ dml_print("DML_DLG: %s: Warning: full_recout_width not set in hsplit mode\n",
+ __func__);
+ full_recout_width = dst->recout_width * 2; // assume half split for dcn1
+ } else
+ full_recout_width = dst->full_recout_width;
+ } else
+ full_recout_width = dst->recout_width;
+
+ // mpc_combine and odm_combine are mutually exclusive
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: full_recout_width = %d\n",
+ __func__,
+ full_recout_width);
+ dml_print("DML_DLG: %s: hscale_pixel_rate_l = %3.2f\n",
+ __func__,
+ hscale_pixel_rate_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); // per line
+
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_line_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_line_delivery_c);
+ }
+
+ // TTU - Luma / Chroma
+ if (access_dir) { // vertical access
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_l = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ dst->odm_combine,
+ full_recout_width,
+ dst->hactive,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); // per req
+
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_c = %3.2f\n",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ // XFC
+ xfc_transfer_delay = get_xfc_transfer_delay(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
+ xfc_precharge_delay = get_xfc_precharge_delay(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_remote_surface_flip_latency = get_xfc_remote_surface_flip_latency(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+ xfc_dst_y_delta_drq_limit = xfc_remote_surface_flip_latency;
+ xfc_prefetch_margin = get_xfc_prefetch_margin(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx);
+
+ // TTU - Cursor
+ refcyc_per_req_delivery_pre_cur0 = 0.0;
+ refcyc_per_req_delivery_cur0 = 0.0;
+ if (src->num_cursors > 0) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur0,
+ &refcyc_per_req_delivery_cur0,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur0_src_width,
+ (enum cursor_bpp)(src->cur0_bpp));
+ }
+
+ refcyc_per_req_delivery_pre_cur1 = 0.0;
+ refcyc_per_req_delivery_cur1 = 0.0;
+ if (src->num_cursors > 1) {
+ calculate_ttu_cursor(mode_lib,
+ &refcyc_per_req_delivery_pre_cur1,
+ &refcyc_per_req_delivery_cur1,
+ refclk_freq_in_mhz,
+ ref_freq_to_pix_freq,
+ hscale_pixel_rate_l,
+ scl->hscl_ratio,
+ vratio_pre_l,
+ vratio_l,
+ src->cur1_src_width,
+ (enum cursor_bpp)(src->cur1_bpp));
+ }
+
+ // TTU - Misc
+ // all hard-coded
+
+ // Assignment to register structures
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; // in terms of line
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; // in terms of refclk
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_vm_flip = (unsigned int) (dst_y_per_vm_flip * dml_pow(2, 2));
+ disp_dlg_regs->dst_y_per_row_flip = (unsigned int) (dst_y_per_row_flip * dml_pow(2, 2));
+
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ < (unsigned int) dml_pow(2, 13));
+ }
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l; // dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_l;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_l = (unsigned int) (dst_y_per_row_flip * htotal
+ * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_l;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / dpte_groups_per_row_ub_c;
+ disp_dlg_regs->refcyc_per_meta_chunk_flip_c = (unsigned int) (dst_y_per_row_flip
+ * htotal * ref_freq_to_pix_freq) / meta_chunks_per_row_ub_c;
+ }
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ if (dual_plane) {
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ if (disp_dlg_regs->dst_y_per_pte_row_nom_c >= (unsigned int) dml_pow(2, 17)) {
+ dml_print("DML_DLG: %s: Warning dst_y_per_pte_row_nom_c %u larger than supported by register format U15.2 %u\n",
+ __func__,
+ disp_dlg_regs->dst_y_per_pte_row_nom_c,
+ (unsigned int) dml_pow(2, 17) - 1);
+ }
+ }
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; // TODO: dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (dual_plane) {
+ disp_dlg_regs->refcyc_per_pte_group_nom_c =
+ (unsigned int) ((double) dpte_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ // TODO: Is this the right calculation? Does htotal need to be halved?
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c =
+ (unsigned int) ((double) meta_row_height_c / (double) vratio_c
+ * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_c = dml_pow(2, 23) - 1;
+ }
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+ disp_dlg_regs->dst_y_offset_cur0 = 0;
+ disp_dlg_regs->chunk_hdl_adjust_cur1 = 3;
+ disp_dlg_regs->dst_y_offset_cur1 = 0;
+
+ disp_dlg_regs->xfc_reg_transfer_delay = xfc_transfer_delay;
+ disp_dlg_regs->xfc_reg_precharge_delay = xfc_precharge_delay;
+ disp_dlg_regs->xfc_reg_remote_surface_flip_latency = xfc_remote_surface_flip_latency;
+ disp_dlg_regs->xfc_reg_prefetch_margin = dml_ceil(xfc_prefetch_margin * refclk_freq_in_mhz,
+ 1);
+
+ // slave has to have this value also set to off
+ if (src->xfc_enable && !src->xfc_slave)
+ disp_dlg_regs->dst_y_delta_drq_limit = dml_ceil(xfc_dst_y_delta_drq_limit, 1);
+ else
+ disp_dlg_regs->dst_y_delta_drq_limit = 0x7fff; // off
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c = (unsigned int) (refcyc_per_req_delivery_pre_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = (unsigned int) (refcyc_per_req_delivery_cur0
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur1 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur1 * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_cur1 = (unsigned int) (refcyc_per_req_delivery_cur1
+ * dml_pow(2, 10));
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
+
+void dml_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support)
+{
+ display_rq_params_st rq_param = {0};
+ display_dlg_sys_params_st dlg_sys_param = {0};
+
+ // Get watermark and Tex.
+ dlg_sys_param.t_urg_wm_us = get_wm_urgent(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.deepsleep_dcfclk_mhz = get_clk_dcf_deepsleep(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_extra_us = get_urgent_extra_latency(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.mem_trip_us = get_wm_memory_trip(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_mclk_wm_us = get_wm_dram_clock_change(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.t_sr_wm_us = get_wm_stutter_enter_exit(mode_lib, e2e_pipe_param, num_pipes);
+ dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
+ e2e_pipe_param,
+ num_pipes);
+ dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
+ / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
+
+ print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+
+ // system parameter calculation done
+
+ dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml_rq_dlg_get_dlg_params(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_idx,
+ dlg_regs,
+ ttu_regs,
+ rq_param.dlg,
+ dlg_sys_param,
+ cstate_en,
+ pstate_en,
+ vm_en,
+ ignore_viewport_pos,
+ immediate_flip_support);
+ dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
+}
+
+void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param)
+{
+ memset(arb_param, 0, sizeof(*arb_param));
+ arb_param->max_req_outstanding = 256;
+ arb_param->min_req_outstanding = 68;
+ arb_param->sat_level_us = 60;
+}
+
+void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+ double *refcyc_per_req_delivery_pre_cur,
+ double *refcyc_per_req_delivery_cur,
+ double refclk_freq_in_mhz,
+ double ref_freq_to_pix_freq,
+ double hscale_pixel_rate_l,
+ double hscl_ratio,
+ double vratio_pre_l,
+ double vratio_l,
+ unsigned int cur_width,
+ enum cursor_bpp cur_bpp)
+{
+ unsigned int cur_src_width = cur_width;
+ unsigned int cur_req_size = 0;
+ unsigned int cur_req_width = 0;
+ double cur_width_ub = 0.0;
+ double cur_req_per_width = 0.0;
+ double hactive_cur = 0.0;
+
+ ASSERT(cur_src_width <= 256);
+
+ *refcyc_per_req_delivery_pre_cur = 0.0;
+ *refcyc_per_req_delivery_cur = 0.0;
+ if (cur_src_width > 0) {
+ unsigned int cur_bit_per_pixel = 0;
+
+ if (cur_bpp == dm_cur_2bit) {
+ cur_req_size = 64; // byte
+ cur_bit_per_pixel = 2;
+ } else { // 32bit
+ cur_bit_per_pixel = 32;
+ if (cur_src_width >= 1 && cur_src_width <= 16)
+ cur_req_size = 64;
+ else if (cur_src_width >= 17 && cur_src_width <= 31)
+ cur_req_size = 128;
+ else
+ cur_req_size = 256;
+ }
+
+ cur_req_width = (double) cur_req_size / ((double) cur_bit_per_pixel / 8.0);
+ cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1)
+ * (double) cur_req_width;
+ cur_req_per_width = cur_width_ub / (double) cur_req_width;
+ hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor
+
+ if (vratio_pre_l <= 1.0) {
+ *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_pre_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ ASSERT(*refcyc_per_req_delivery_pre_cur < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ *refcyc_per_req_delivery_cur = hactive_cur * ref_freq_to_pix_freq
+ / (double) cur_req_per_width;
+ } else {
+ *refcyc_per_req_delivery_cur = (double) refclk_freq_in_mhz
+ * (double) cur_src_width / hscale_pixel_rate_l
+ / (double) cur_req_per_width;
+ }
+
+ dml_print("DML_DLG: %s: cur_req_width = %d\n",
+ __func__,
+ cur_req_width);
+ dml_print("DML_DLG: %s: cur_width_ub = %3.2f\n",
+ __func__,
+ cur_width_ub);
+ dml_print("DML_DLG: %s: cur_req_per_width = %3.2f\n",
+ __func__,
+ cur_req_per_width);
+ dml_print("DML_DLG: %s: hactive_cur = %3.2f\n",
+ __func__,
+ hactive_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_pre_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_pre_cur);
+ dml_print("DML_DLG: %s: refcyc_per_req_delivery_cur = %3.2f\n",
+ __func__,
+ *refcyc_per_req_delivery_cur);
+
+ ASSERT(*refcyc_per_req_delivery_cur < dml_pow(2, 13));
+ }
+}
+
+unsigned int dml_rq_dlg_get_calculated_vstartup(struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx)
+{
+ unsigned int vstartup_pipe[DC__NUM_PIPES__MAX];
+ bool visited[DC__NUM_PIPES__MAX];
+ unsigned int pipe_inst = 0;
+ unsigned int i, j, k;
+
+ for (k = 0; k < num_pipes; ++k)
+ visited[k] = false;
+
+ for (i = 0; i < num_pipes; i++) {
+ if (e2e_pipe_param[i].pipe.src.is_hsplit && !visited[i]) {
+ unsigned int grp = e2e_pipe_param[i].pipe.src.hsplit_grp;
+
+ for (j = i; j < num_pipes; j++) {
+ if (e2e_pipe_param[j].pipe.src.hsplit_grp == grp
+ && e2e_pipe_param[j].pipe.src.is_hsplit
+ && !visited[j]) {
+ vstartup_pipe[j] = get_vstartup_calculated(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_inst);
+ visited[j] = true;
+ }
+ }
+
+ pipe_inst++;
+ }
+
+ if (!visited[i]) {
+ vstartup_pipe[i] = get_vstartup_calculated(mode_lib,
+ e2e_pipe_param,
+ num_pipes,
+ pipe_inst);
+ visited[i] = true;
+ pipe_inst++;
+ }
+ }
+
+ return vstartup_pipe[pipe_idx];
+
+}
+
+void dml_rq_dlg_get_row_heights(struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma)
+{
+ display_data_rq_dlg_params_st rq_dlg_param;
+ display_data_rq_misc_params_st rq_misc_param;
+ display_data_rq_sizing_params_st rq_sizing_param;
+
+ get_meta_and_pte_attr(mode_lib,
+ &rq_dlg_param,
+ &rq_misc_param,
+ &rq_sizing_param,
+ vp_width,
+ 0, // dummy
+ data_pitch,
+ 0, // dummy
+ source_format,
+ tiling,
+ macro_tile_size,
+ source_scan,
+ is_chroma);
+
+ *o_dpte_row_height = rq_dlg_param.dpte_row_height;
+ *o_meta_row_height = rq_dlg_param.meta_row_height;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
new file mode 100644
index 000000000000..efdd4c73d8f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_calc.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML2_DISPLAY_RQ_DLG_CALC_H__
+#define __DML2_DISPLAY_RQ_DLG_CALC_H__
+
+#include "dml_common_defs.h"
+#include "display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+// Function: dml_rq_dlg_get_rq_params
+// Calculate requestor related parameters that register definition agnostic
+// (i.e. this layer does try to separate real values from register definition)
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
+//
+void dml_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st pipe_src_param);
+
+// Function: dml_rq_dlg_get_rq_reg
+// Main entry point for test to get the register values out of this DML class.
+// This function calls <get_rq_param> and <extract_rq_regs> fucntions to calculate
+// and then populate the rq_regs struct
+// Input:
+// pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+// Output:
+// rq_regs - struct that holds all the RQ registers field value.
+// See also: <display_rq_regs_st>
+void dml_rq_dlg_get_rq_reg(
+ struct display_mode_lib *mode_lib,
+ display_rq_regs_st *rq_regs,
+ const display_pipe_source_params_st pipe_src_param);
+
+// Function: dml_rq_dlg_get_dlg_params
+// Calculate deadline related parameters
+//
+void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ display_dlg_regs_st *disp_dlg_regs,
+ display_ttu_regs_st *disp_ttu_regs,
+ const display_rq_dlg_params_st rq_dlg_param,
+ const display_dlg_sys_params_st dlg_sys_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+// Function: dml_rq_dlg_get_dlg_param_prefetch
+// For flip_bw programming guide change, now dml needs to calculate the flip_bytes and prefetch_bw
+// for ALL pipes and use this info to calculate the prefetch programming.
+// Output: prefetch_param.prefetch_bw and flip_bytes
+void dml_rq_dlg_get_dlg_params_prefetch(
+ struct display_mode_lib *mode_lib,
+ display_dlg_prefetch_param_st *prefetch_param,
+ display_rq_dlg_params_st rq_dlg_param,
+ display_dlg_sys_params_st dlg_sys_param,
+ display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en);
+
+// Function: dml_rq_dlg_get_dlg_reg
+// Calculate and return DLG and TTU register struct given the system setting
+// Output:
+// dlg_regs - output DLG register struct
+// ttu_regs - output DLG TTU register struct
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// cstate - 0: when calculate min_ttu_vblank it is assumed cstate is not required. 1: Normal mode, cstate is considered.
+// Added for legacy or unrealistic timing tests.
+void dml_rq_dlg_get_dlg_reg(
+ struct display_mode_lib *mode_lib,
+ display_dlg_regs_st *dlg_regs,
+ display_ttu_regs_st *ttu_regs,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool ignore_viewport_pos,
+ const bool immediate_flip_support);
+
+// Function: dml_rq_dlg_get_calculated_vstartup
+// Calculate and return vstartup
+// Output:
+// unsigned int vstartup
+// Input:
+// e2e_pipe_param - "compacted" array of e2e pipe param struct
+// num_pipes - num of active "pipe" or "route"
+// pipe_idx - index that identifies the e2e_pipe_param that corresponding to this dlg
+// NOTE: this MUST be called after setting the prefetch mode!
+unsigned int dml_rq_dlg_get_calculated_vstartup(
+ struct display_mode_lib *mode_lib,
+ display_e2e_pipe_params_st *e2e_pipe_param,
+ const unsigned int num_pipes,
+ const unsigned int pipe_idx);
+
+// Function: dml_rq_dlg_get_row_heights
+// Calculate dpte and meta row heights
+void dml_rq_dlg_get_row_heights(
+ struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma);
+
+// Function: dml_rq_dlg_get_arb_params
+void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
new file mode 100644
index 000000000000..189052e911fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "display_rq_dlg_helpers.h"
+
+void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
+{
+ dml_print("DML_RQ_DLG_CALC: ***************************\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n");
+ print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_c);
+
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_c);
+
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_c);
+ dml_print("DML_RQ_DLG_CALC: ***************************\n");
+}
+
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing.chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing.min_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing.meta_chunk_bytes);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n",
+ rq_sizing.min_meta_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing.mpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing.dpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: swath_width_ub = %0d\n",
+ rq_dlg_param.swath_width_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: swath_height = %0d\n",
+ rq_dlg_param.swath_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n",
+ rq_dlg_param.req_per_swath_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n",
+ rq_dlg_param.meta_pte_bytes_per_frame_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_req_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_groups_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_row_height = %0d\n",
+ rq_dlg_param.dpte_row_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n",
+ rq_dlg_param.dpte_bytes_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n",
+ rq_dlg_param.meta_chunks_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n",
+ rq_dlg_param.meta_req_per_row_ub);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_row_height = %0d\n",
+ rq_dlg_param.meta_row_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n",
+ rq_dlg_param.meta_bytes_per_row_ub);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: full_swath_bytes = %0d\n",
+ rq_misc_param.full_swath_bytes);
+ dml_print(
+ "DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n",
+ rq_misc_param.stored_swath_bytes);
+ dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param.blk256_width);
+ dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param.blk256_height);
+ dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param.req_width);
+ dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param.req_height);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_c);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
+ dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param.t_mclk_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param.t_urg_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param.t_sr_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param.t_extra_us);
+ dml_print(
+ "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
+ dlg_sys_param.t_srx_delay_us);
+ dml_print(
+ "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
+ dlg_sys_param.deepsleep_dcfclk_mhz);
+ dml_print(
+ "DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n",
+ dlg_sys_param.total_flip_bw);
+ dml_print(
+ "DML_RQ_DLG_CALC: total_flip_bytes = %i\n",
+ dlg_sys_param.total_flip_bytes);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st rq_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n");
+ dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs.chunk_size);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs.min_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs.meta_chunk_size);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n",
+ rq_regs.min_meta_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs.dpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs.mpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs.swath_height);
+ dml_print(
+ "DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n",
+ rq_regs.pte_row_height_linear);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n");
+ dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
+ print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_l);
+ dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
+ print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_c);
+ dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs.drq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs.prq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs.mrq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs.crq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs.plane1_base_address);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n",
+ dlg_regs.refcyc_h_blank_end);
+ dml_print(
+ "DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n",
+ dlg_regs.dlg_vblank_end);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n",
+ dlg_regs.min_dst_y_next_start);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n",
+ dlg_regs.refcyc_per_htotal);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n",
+ dlg_regs.refcyc_x_after_scaler);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n",
+ dlg_regs.dst_y_after_scaler);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n",
+ dlg_regs.dst_y_prefetch);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n",
+ dlg_regs.dst_y_per_vm_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n",
+ dlg_regs.dst_y_per_row_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n",
+ dlg_regs.dst_y_per_vm_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n",
+ dlg_regs.dst_y_per_row_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n",
+ dlg_regs.ref_freq_to_pix_freq);
+ dml_print(
+ "DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n",
+ dlg_regs.vratio_prefetch);
+ dml_print(
+ "DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n",
+ dlg_regs.vratio_prefetch_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_vblank_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_vblank_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_vblank_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_vblank_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_flip_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_flip_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_flip_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_flip_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n",
+ dlg_regs.dst_y_per_pte_row_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n",
+ dlg_regs.dst_y_per_pte_row_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n",
+ dlg_regs.refcyc_per_pte_group_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n",
+ dlg_regs.dst_y_per_meta_row_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n",
+ dlg_regs.dst_y_per_meta_row_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_nom_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n",
+ dlg_regs.refcyc_per_meta_chunk_nom_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_pre_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_pre_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n",
+ dlg_regs.refcyc_per_line_delivery_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n",
+ dlg_regs.chunk_hdl_adjust_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n",
+ dlg_regs.dst_y_offset_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n",
+ dlg_regs.chunk_hdl_adjust_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n",
+ dlg_regs.vready_after_vcount0);
+ dml_print(
+ "DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n",
+ dlg_regs.dst_y_delta_drq_limit);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n",
+ dlg_regs.xfc_reg_transfer_delay);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n",
+ dlg_regs.xfc_reg_precharge_delay);
+ dml_print(
+ "DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
+ dlg_regs.xfc_reg_remote_surface_flip_latency);
+
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
+
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs)
+{
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+ dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n");
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n",
+ ttu_regs.qos_level_low_wm);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n",
+ ttu_regs.qos_level_high_wm);
+ dml_print(
+ "DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n",
+ ttu_regs.min_ttu_vblank);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n",
+ ttu_regs.qos_level_flip);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n",
+ ttu_regs.refcyc_per_req_delivery_pre_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n",
+ ttu_regs.qos_level_fixed_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_l);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n",
+ ttu_regs.qos_level_fixed_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_c);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n",
+ ttu_regs.qos_level_fixed_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_cur0);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n",
+ ttu_regs.qos_level_fixed_cur1);
+ dml_print(
+ "DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n",
+ ttu_regs.qos_ramp_disable_cur1);
+ dml_print("DML_RQ_DLG_CALC: =====================================\n");
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
new file mode 100644
index 000000000000..1f24db830737
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_RQ_DLG_HELPERS_H__
+#define __DISPLAY_RQ_DLG_HELPERS_H__
+
+#include "dml_common_defs.h"
+#include "display_mode_lib.h"
+
+/* Function: Printer functions
+ * Print various struct
+ */
+void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param);
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing);
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param);
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param);
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param);
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param);
+
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st data_rq_regs);
+void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs);
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs);
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
new file mode 100644
index 000000000000..1e4b1e383401
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -0,0 +1,1905 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml1_display_rq_dlg_calc.h"
+#include "display_mode_lib.h"
+
+#include "dml_inline_defs.h"
+
+static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+{
+ unsigned int ret_val = 0;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+ ret_val = 2;
+ } else if (source_format == dm_444_32) {
+ if (!is_chroma)
+ ret_val = 4;
+ } else if (source_format == dm_444_64) {
+ if (!is_chroma)
+ ret_val = 8;
+ } else if (source_format == dm_420_8) {
+ if (is_chroma)
+ ret_val = 2;
+ else
+ ret_val = 1;
+ } else if (source_format == dm_420_10) {
+ if (is_chroma)
+ ret_val = 4;
+ else
+ ret_val = 2;
+ }
+ return ret_val;
+}
+
+static bool is_dual_plane(enum source_format_class source_format)
+{
+ bool ret_val = 0;
+
+ if ((source_format == dm_420_8) || (source_format == dm_420_10))
+ ret_val = 1;
+
+ return ret_val;
+}
+
+static void get_blk256_size(
+ unsigned int *blk256_width,
+ unsigned int *blk256_height,
+ unsigned int bytes_per_element)
+{
+ if (bytes_per_element == 1) {
+ *blk256_width = 16;
+ *blk256_height = 16;
+ } else if (bytes_per_element == 2) {
+ *blk256_width = 16;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 4) {
+ *blk256_width = 8;
+ *blk256_height = 8;
+ } else if (bytes_per_element == 8) {
+ *blk256_width = 8;
+ *blk256_height = 4;
+ }
+}
+
+static double get_refcyc_per_delivery(
+ struct display_mode_lib *mode_lib,
+ double refclk_freq_in_mhz,
+ double pclk_freq_in_mhz,
+ unsigned int recout_width,
+ double vratio,
+ double hscale_pixel_rate,
+ unsigned int delivery_width,
+ unsigned int req_per_swath_ub)
+{
+ double refcyc_per_delivery = 0.0;
+
+ if (vratio <= 1.0) {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) recout_width
+ / pclk_freq_in_mhz / (double) req_per_swath_ub;
+ } else {
+ refcyc_per_delivery = (double) refclk_freq_in_mhz * (double) delivery_width
+ / (double) hscale_pixel_rate / (double) req_per_swath_ub;
+ }
+
+ DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
+ DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
+ DTRACE("DLG: %s: recout_width = %d", __func__, recout_width);
+ DTRACE("DLG: %s: vratio = %3.2f", __func__, vratio);
+ DTRACE("DLG: %s: req_per_swath_ub = %d", __func__, req_per_swath_ub);
+ DTRACE("DLG: %s: refcyc_per_delivery= %3.2f", __func__, refcyc_per_delivery);
+
+ return refcyc_per_delivery;
+
+}
+
+static double get_vratio_pre(
+ struct display_mode_lib *mode_lib,
+ unsigned int max_num_sw,
+ unsigned int max_partial_sw,
+ unsigned int swath_height,
+ double vinit,
+ double l_sw)
+{
+ double prefill = dml_floor(vinit, 1);
+ double vratio_pre = 1.0;
+
+ vratio_pre = (max_num_sw * swath_height + max_partial_sw) / l_sw;
+
+ if (swath_height > 4) {
+ double tmp0 = (max_num_sw * swath_height) / (l_sw - (prefill - 3.0) / 2.0);
+
+ if (tmp0 > vratio_pre)
+ vratio_pre = tmp0;
+ }
+
+ DTRACE("DLG: %s: max_num_sw = %0d", __func__, max_num_sw);
+ DTRACE("DLG: %s: max_partial_sw = %0d", __func__, max_partial_sw);
+ DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
+ DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
+ DTRACE("DLG: %s: vratio_pre = %3.2f", __func__, vratio_pre);
+
+ if (vratio_pre < 1.0) {
+ DTRACE("WARNING_DLG: %s: vratio_pre=%3.2f < 1.0, set to 1.0", __func__, vratio_pre);
+ vratio_pre = 1.0;
+ }
+
+ if (vratio_pre > 4.0) {
+ DTRACE(
+ "WARNING_DLG: %s: vratio_pre=%3.2f > 4.0 (max scaling ratio). set to 4.0",
+ __func__,
+ vratio_pre);
+ vratio_pre = 4.0;
+ }
+
+ return vratio_pre;
+}
+
+static void get_swath_need(
+ struct display_mode_lib *mode_lib,
+ unsigned int *max_num_sw,
+ unsigned int *max_partial_sw,
+ unsigned int swath_height,
+ double vinit)
+{
+ double prefill = dml_floor(vinit, 1);
+ unsigned int max_partial_sw_int;
+
+ DTRACE("DLG: %s: swath_height = %0d", __func__, swath_height);
+ DTRACE("DLG: %s: vinit = %3.2f", __func__, vinit);
+
+ ASSERT(prefill > 0.0 && prefill <= 8.0);
+
+ *max_num_sw = (unsigned int) (dml_ceil((prefill - 1.0) / (double) swath_height, 1) + 1.0); /* prefill has to be >= 1 */
+ max_partial_sw_int =
+ (prefill == 1) ?
+ (swath_height - 1) :
+ ((unsigned int) (prefill - 2.0) % swath_height);
+ *max_partial_sw = (max_partial_sw_int < 1) ? 1 : max_partial_sw_int; /* ensure minimum of 1 is used */
+
+ DTRACE("DLG: %s: max_num_sw = %0d", __func__, *max_num_sw);
+ DTRACE("DLG: %s: max_partial_sw = %0d", __func__, *max_partial_sw);
+}
+
+static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_size)
+{
+ if (tile_size == dm_256k_tile)
+ return (256 * 1024);
+ else if (tile_size == dm_64k_tile)
+ return (64 * 1024);
+ else
+ return (4 * 1024);
+}
+
+static void extract_rq_sizing_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_data_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_data_rq_sizing_params_st rq_sizing)
+{
+ DTRACE("DLG: %s: rq_sizing param", __func__);
+ print__data_rq_sizing_params_st(mode_lib, rq_sizing);
+
+ rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+
+ if (rq_sizing.min_chunk_bytes == 0)
+ rq_regs->min_chunk_size = 0;
+ else
+ rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
+ if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->min_meta_chunk_size = 0;
+ else
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+
+ rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+}
+
+void dml1_extract_rq_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_rq_params_st rq_param)
+{
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+ unsigned int detile_buf_plane1_addr = 0;
+
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ if (rq_param.yuv420)
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+
+ /* FIXME: take the max between luma, chroma chunk size?
+ * okay for now, as we are setting chunk_bytes to 8kb anyways
+ */
+ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
+ rq_regs->drq_expansion_mode = 0;
+ } else {
+ rq_regs->drq_expansion_mode = 2;
+ }
+ rq_regs->prq_expansion_mode = 1;
+ rq_regs->mrq_expansion_mode = 1;
+ rq_regs->crq_expansion_mode = 1;
+
+ if (rq_param.yuv420) {
+ if ((double) rq_param.misc.rq_l.stored_swath_bytes
+ / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */
+ } else {
+ detile_buf_plane1_addr = dml_round_to_multiple(
+ (unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
+ 256,
+ 0) / 64.0; /* 2/3 to chroma */
+ }
+ }
+ rq_regs->plane1_base_address = detile_buf_plane1_addr;
+}
+
+static void handle_det_buf_split(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+{
+ unsigned int total_swath_bytes = 0;
+ unsigned int swath_bytes_l = 0;
+ unsigned int swath_bytes_c = 0;
+ unsigned int full_swath_bytes_packed_l = 0;
+ unsigned int full_swath_bytes_packed_c = 0;
+ bool req128_l = 0;
+ bool req128_c = 0;
+ bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ unsigned int log2_swath_height_l = 0;
+ unsigned int log2_swath_height_c = 0;
+ unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
+
+ full_swath_bytes_packed_l = rq_param->misc.rq_l.full_swath_bytes;
+ full_swath_bytes_packed_c = rq_param->misc.rq_c.full_swath_bytes;
+
+ if (rq_param->yuv420_10bpc) {
+ full_swath_bytes_packed_l = dml_round_to_multiple(
+ rq_param->misc.rq_l.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ full_swath_bytes_packed_c = dml_round_to_multiple(
+ rq_param->misc.rq_c.full_swath_bytes * 2 / 3,
+ 256,
+ 1) + 256;
+ }
+
+ if (rq_param->yuv420) {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes) { /*full 256b request */
+ req128_l = 0;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ } else { /*128b request (for luma only for yuv420 8bpc) */
+ req128_l = 1;
+ req128_c = 0;
+ swath_bytes_l = full_swath_bytes_packed_l / 2;
+ swath_bytes_c = full_swath_bytes_packed_c;
+ }
+
+ /* Bug workaround, luma and chroma req size needs to be the same. (see: DEGVIDCN10-137)
+ * TODO: Remove after rtl fix
+ */
+ if (req128_l == 1) {
+ req128_c = 1;
+ DTRACE("DLG: %s: bug workaround DEGVIDCN10-137", __func__);
+ }
+
+ /* Note: assumption, the config that pass in will fit into
+ * the detiled buffer.
+ */
+ } else {
+ total_swath_bytes = 2 * full_swath_bytes_packed_l;
+
+ if (total_swath_bytes <= detile_buf_size_in_bytes)
+ req128_l = 0;
+ else
+ req128_l = 1;
+
+ swath_bytes_l = total_swath_bytes;
+ swath_bytes_c = 0;
+ }
+ rq_param->misc.rq_l.stored_swath_bytes = swath_bytes_l;
+ rq_param->misc.rq_c.stored_swath_bytes = swath_bytes_c;
+
+ if (surf_linear) {
+ log2_swath_height_l = 0;
+ log2_swath_height_c = 0;
+ } else if (!surf_vert) {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_height) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_height) - req128_c;
+ } else {
+ log2_swath_height_l = dml_log2(rq_param->misc.rq_l.blk256_width) - req128_l;
+ log2_swath_height_c = dml_log2(rq_param->misc.rq_c.blk256_width) - req128_c;
+ }
+ rq_param->dlg.rq_l.swath_height = 1 << log2_swath_height_l;
+ rq_param->dlg.rq_c.swath_height = 1 << log2_swath_height_c;
+
+ DTRACE("DLG: %s: req128_l = %0d", __func__, req128_l);
+ DTRACE("DLG: %s: req128_c = %0d", __func__, req128_c);
+ DTRACE("DLG: %s: full_swath_bytes_packed_l = %0d", __func__, full_swath_bytes_packed_l);
+ DTRACE("DLG: %s: full_swath_bytes_packed_c = %0d", __func__, full_swath_bytes_packed_c);
+}
+
+/* Need refactor. */
+static void dml1_rq_dlg_get_row_heights(
+ struct display_mode_lib *mode_lib,
+ unsigned int *o_dpte_row_height,
+ unsigned int *o_meta_row_height,
+ unsigned int vp_width,
+ unsigned int data_pitch,
+ int source_format,
+ int tiling,
+ int macro_tile_size,
+ int source_scan,
+ int is_chroma)
+{
+ bool surf_linear = (tiling == dm_sw_linear);
+ bool surf_vert = (source_scan == dm_vert);
+
+ unsigned int bytes_per_element = get_bytes_per_element(
+ (enum source_format_class) source_format,
+ is_chroma);
+ unsigned int log2_bytes_per_element = dml_log2(bytes_per_element);
+ unsigned int blk256_width = 0;
+ unsigned int blk256_height = 0;
+
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int log2_meta_row_height;
+ unsigned int log2_vmpg_bytes;
+ unsigned int dpte_buf_in_pte_reqs;
+ unsigned int log2_vmpg_height;
+ unsigned int log2_vmpg_width;
+ unsigned int log2_dpte_req_height_ptes;
+ unsigned int log2_dpte_req_width_ptes;
+ unsigned int log2_dpte_req_height;
+ unsigned int log2_dpte_req_width;
+ unsigned int log2_dpte_row_height_linear;
+ unsigned int log2_dpte_row_height;
+ unsigned int dpte_req_width;
+
+ if (surf_linear) {
+ blk256_width = 256;
+ blk256_height = 1;
+ } else {
+ get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
+ }
+
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes = surf_linear ?
+ 256 : get_blk_size_bytes((enum source_macro_tile_size) macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ /* remember log rule
+ * "+" in log is multiply
+ * "-" in log is divide
+ * "/2" is like square root
+ * blk is vertical biased
+ */
+ if (tiling != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; /* blk height of 1 */
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ /* ------- */
+ /* meta */
+ /* ------- */
+ log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
+
+ /* each 64b meta request for dcn is 8x8 meta elements and
+ * a meta element covers one 256b block of the the data surface.
+ */
+ log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 */
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ log2_meta_row_height = 0;
+
+ /* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ * calculate upper bound of the meta_row_width
+ */
+ if (!surf_vert)
+ log2_meta_row_height = log2_meta_req_height;
+ else
+ log2_meta_row_height = log2_meta_req_width;
+
+ *o_meta_row_height = 1 << log2_meta_row_height;
+
+ /* ------ */
+ /* dpte */
+ /* ------ */
+ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+
+ log2_vmpg_height = 0;
+ log2_vmpg_width = 0;
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width_ptes = 0;
+ log2_dpte_req_height = 0;
+ log2_dpte_req_width = 0;
+ log2_dpte_row_height_linear = 0;
+ log2_dpte_row_height = 0;
+ dpte_req_width = 0; /* 64b dpte req width in data element */
+
+ if (surf_linear)
+ log2_vmpg_height = 0; /* one line high */
+ else
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ /* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
+ if (log2_blk_bytes <= log2_vmpg_bytes)
+ log2_dpte_req_height_ptes = 0;
+ else if (log2_blk_height - log2_vmpg_height >= 2)
+ log2_dpte_req_height_ptes = 2;
+ else
+ log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
+ log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
+
+ ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
+ (log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
+ (log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
+
+ /* the dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ * log2_wmpg_width is how much 1 pte represent, now trying to calculate how much 64b pte req represent
+ */
+ log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ /* calculate pitch dpte row buffer can hold
+ * round the result down to a power of two.
+ */
+ if (surf_linear) {
+ log2_dpte_row_height_linear = dml_floor(
+ dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ } else {
+ /* the upper bound of the dpte_row_width without dependency on viewport position follows. */
+ if (!surf_vert)
+ log2_dpte_row_height = log2_dpte_req_height;
+ else
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ }
+
+ /* From programming guide:
+ * There is a special case of saving only half of ptes returned due to buffer space limits.
+ * this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
+ * when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
+ */
+ if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
+ && log2_blk_bytes >= 16)
+ log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
+
+ *o_dpte_row_height = 1 << log2_dpte_row_height;
+}
+
+static void get_surf_rq_param(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param,
+ struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param,
+ struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param,
+ bool is_chroma)
+{
+ bool mode_422 = 0;
+ unsigned int vp_width = 0;
+ unsigned int vp_height = 0;
+ unsigned int data_pitch = 0;
+ unsigned int meta_pitch = 0;
+ unsigned int ppe = mode_422 ? 2 : 1;
+ bool surf_linear;
+ bool surf_vert;
+ unsigned int bytes_per_element;
+ unsigned int log2_bytes_per_element;
+ unsigned int blk256_width;
+ unsigned int blk256_height;
+ unsigned int log2_blk256_width;
+ unsigned int log2_blk256_height;
+ unsigned int blk_bytes;
+ unsigned int log2_blk_bytes;
+ unsigned int log2_blk_height;
+ unsigned int log2_blk_width;
+ unsigned int log2_meta_req_bytes;
+ unsigned int log2_meta_req_height;
+ unsigned int log2_meta_req_width;
+ unsigned int meta_req_width;
+ unsigned int meta_req_height;
+ unsigned int log2_meta_row_height;
+ unsigned int meta_row_width_ub;
+ unsigned int log2_meta_chunk_bytes;
+ unsigned int log2_meta_chunk_height;
+ unsigned int log2_meta_chunk_width;
+ unsigned int log2_min_meta_chunk_bytes;
+ unsigned int min_meta_chunk_width;
+ unsigned int meta_chunk_width;
+ unsigned int meta_chunk_per_row_int;
+ unsigned int meta_row_remainder;
+ unsigned int meta_chunk_threshold;
+ unsigned int meta_blk_bytes;
+ unsigned int meta_blk_height;
+ unsigned int meta_blk_width;
+ unsigned int meta_surface_bytes;
+ unsigned int vmpg_bytes;
+ unsigned int meta_pte_req_per_frame_ub;
+ unsigned int meta_pte_bytes_per_frame_ub;
+ unsigned int log2_vmpg_bytes;
+ unsigned int dpte_buf_in_pte_reqs;
+ unsigned int log2_vmpg_height;
+ unsigned int log2_vmpg_width;
+ unsigned int log2_dpte_req_height_ptes;
+ unsigned int log2_dpte_req_width_ptes;
+ unsigned int log2_dpte_req_height;
+ unsigned int log2_dpte_req_width;
+ unsigned int log2_dpte_row_height_linear;
+ unsigned int log2_dpte_row_height;
+ unsigned int log2_dpte_group_width;
+ unsigned int dpte_row_width_ub;
+ unsigned int dpte_row_height;
+ unsigned int dpte_req_height;
+ unsigned int dpte_req_width;
+ unsigned int dpte_group_width;
+ unsigned int log2_dpte_group_bytes;
+ unsigned int log2_dpte_group_length;
+ unsigned int func_meta_row_height, func_dpte_row_height;
+
+ /* FIXME check if ppe apply for both luma and chroma in 422 case */
+ if (is_chroma) {
+ vp_width = pipe_src_param.viewport_width_c / ppe;
+ vp_height = pipe_src_param.viewport_height_c;
+ data_pitch = pipe_src_param.data_pitch_c;
+ meta_pitch = pipe_src_param.meta_pitch_c;
+ } else {
+ vp_width = pipe_src_param.viewport_width / ppe;
+ vp_height = pipe_src_param.viewport_height;
+ data_pitch = pipe_src_param.data_pitch;
+ meta_pitch = pipe_src_param.meta_pitch;
+ }
+
+ rq_sizing_param->chunk_bytes = 8192;
+
+ if (rq_sizing_param->chunk_bytes == 64 * 1024)
+ rq_sizing_param->min_chunk_bytes = 0;
+ else
+ rq_sizing_param->min_chunk_bytes = 1024;
+
+ rq_sizing_param->meta_chunk_bytes = 2048;
+ rq_sizing_param->min_meta_chunk_bytes = 256;
+
+ rq_sizing_param->mpte_group_bytes = 2048;
+
+ surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
+ surf_vert = (pipe_src_param.source_scan == dm_vert);
+
+ bytes_per_element = get_bytes_per_element(
+ (enum source_format_class) pipe_src_param.source_format,
+ is_chroma);
+ log2_bytes_per_element = dml_log2(bytes_per_element);
+ blk256_width = 0;
+ blk256_height = 0;
+
+ if (surf_linear) {
+ blk256_width = 256 / bytes_per_element;
+ blk256_height = 1;
+ } else {
+ get_blk256_size(&blk256_width, &blk256_height, bytes_per_element);
+ }
+
+ DTRACE("DLG: %s: surf_linear = %d", __func__, surf_linear);
+ DTRACE("DLG: %s: surf_vert = %d", __func__, surf_vert);
+ DTRACE("DLG: %s: blk256_width = %d", __func__, blk256_width);
+ DTRACE("DLG: %s: blk256_height = %d", __func__, blk256_height);
+
+ log2_blk256_width = dml_log2((double) blk256_width);
+ log2_blk256_height = dml_log2((double) blk256_height);
+ blk_bytes =
+ surf_linear ? 256 : get_blk_size_bytes(
+ (enum source_macro_tile_size) pipe_src_param.macro_tile_size);
+ log2_blk_bytes = dml_log2((double) blk_bytes);
+ log2_blk_height = 0;
+ log2_blk_width = 0;
+
+ /* remember log rule
+ * "+" in log is multiply
+ * "-" in log is divide
+ * "/2" is like square root
+ * blk is vertical biased
+ */
+ if (pipe_src_param.sw_mode != dm_sw_linear)
+ log2_blk_height = log2_blk256_height
+ + dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
+ else
+ log2_blk_height = 0; /* blk height of 1 */
+
+ log2_blk_width = log2_blk_bytes - log2_bytes_per_element - log2_blk_height;
+
+ if (!surf_vert) {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(vp_width - 1, blk256_width, 1)
+ + blk256_width;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_width;
+ } else {
+ rq_dlg_param->swath_width_ub = dml_round_to_multiple(
+ vp_height - 1,
+ blk256_height,
+ 1) + blk256_height;
+ rq_dlg_param->req_per_swath_ub = rq_dlg_param->swath_width_ub >> log2_blk256_height;
+ }
+
+ if (!surf_vert)
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_height
+ * bytes_per_element;
+ else
+ rq_misc_param->full_swath_bytes = rq_dlg_param->swath_width_ub * blk256_width
+ * bytes_per_element;
+
+ rq_misc_param->blk256_height = blk256_height;
+ rq_misc_param->blk256_width = blk256_width;
+
+ /* ------- */
+ /* meta */
+ /* ------- */
+ log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
+
+ /* each 64b meta request for dcn is 8x8 meta elements and
+ * a meta element covers one 256b block of the the data surface.
+ */
+ log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 byte, each byte represent 1 blk256 */
+ log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
+ - log2_meta_req_height;
+ meta_req_width = 1 << log2_meta_req_width;
+ meta_req_height = 1 << log2_meta_req_height;
+ log2_meta_row_height = 0;
+ meta_row_width_ub = 0;
+
+ /* the dimensions of a meta row are meta_row_width x meta_row_height in elements.
+ * calculate upper bound of the meta_row_width
+ */
+ if (!surf_vert) {
+ log2_meta_row_height = log2_meta_req_height;
+ meta_row_width_ub = dml_round_to_multiple(vp_width - 1, meta_req_width, 1)
+ + meta_req_width;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_width;
+ } else {
+ log2_meta_row_height = log2_meta_req_width;
+ meta_row_width_ub = dml_round_to_multiple(vp_height - 1, meta_req_height, 1)
+ + meta_req_height;
+ rq_dlg_param->meta_req_per_row_ub = meta_row_width_ub / meta_req_height;
+ }
+ rq_dlg_param->meta_bytes_per_row_ub = rq_dlg_param->meta_req_per_row_ub * 64;
+
+ log2_meta_chunk_bytes = dml_log2(rq_sizing_param->meta_chunk_bytes);
+ log2_meta_chunk_height = log2_meta_row_height;
+
+ /*full sized meta chunk width in unit of data elements */
+ log2_meta_chunk_width = log2_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height;
+ log2_min_meta_chunk_bytes = dml_log2(rq_sizing_param->min_meta_chunk_bytes);
+ min_meta_chunk_width = 1
+ << (log2_min_meta_chunk_bytes + 8 - log2_bytes_per_element
+ - log2_meta_chunk_height);
+ meta_chunk_width = 1 << log2_meta_chunk_width;
+ meta_chunk_per_row_int = (unsigned int) (meta_row_width_ub / meta_chunk_width);
+ meta_row_remainder = meta_row_width_ub % meta_chunk_width;
+ meta_chunk_threshold = 0;
+ meta_blk_bytes = 4096;
+ meta_blk_height = blk256_height * 64;
+ meta_blk_width = meta_blk_bytes * 256 / bytes_per_element / meta_blk_height;
+ meta_surface_bytes = meta_pitch
+ * (dml_round_to_multiple(vp_height - 1, meta_blk_height, 1)
+ + meta_blk_height) * bytes_per_element / 256;
+ vmpg_bytes = mode_lib->soc.vmm_page_size_bytes;
+ meta_pte_req_per_frame_ub = (dml_round_to_multiple(
+ meta_surface_bytes - vmpg_bytes,
+ 8 * vmpg_bytes,
+ 1) + 8 * vmpg_bytes) / (8 * vmpg_bytes);
+ meta_pte_bytes_per_frame_ub = meta_pte_req_per_frame_ub * 64; /*64B mpte request */
+ rq_dlg_param->meta_pte_bytes_per_frame_ub = meta_pte_bytes_per_frame_ub;
+
+ DTRACE("DLG: %s: meta_blk_height = %d", __func__, meta_blk_height);
+ DTRACE("DLG: %s: meta_blk_width = %d", __func__, meta_blk_width);
+ DTRACE("DLG: %s: meta_surface_bytes = %d", __func__, meta_surface_bytes);
+ DTRACE("DLG: %s: meta_pte_req_per_frame_ub = %d", __func__, meta_pte_req_per_frame_ub);
+ DTRACE("DLG: %s: meta_pte_bytes_per_frame_ub = %d", __func__, meta_pte_bytes_per_frame_ub);
+
+ if (!surf_vert)
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_width;
+ else
+ meta_chunk_threshold = 2 * min_meta_chunk_width - meta_req_height;
+
+ if (meta_row_remainder <= meta_chunk_threshold)
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 1;
+ else
+ rq_dlg_param->meta_chunks_per_row_ub = meta_chunk_per_row_int + 2;
+
+ rq_dlg_param->meta_row_height = 1 << log2_meta_row_height;
+
+ /* ------ */
+ /* dpte */
+ /* ------ */
+ log2_vmpg_bytes = dml_log2(mode_lib->soc.vmm_page_size_bytes);
+ dpte_buf_in_pte_reqs = mode_lib->ip.dpte_buffer_size_in_pte_reqs;
+
+ log2_vmpg_height = 0;
+ log2_vmpg_width = 0;
+ log2_dpte_req_height_ptes = 0;
+ log2_dpte_req_width_ptes = 0;
+ log2_dpte_req_height = 0;
+ log2_dpte_req_width = 0;
+ log2_dpte_row_height_linear = 0;
+ log2_dpte_row_height = 0;
+ log2_dpte_group_width = 0;
+ dpte_row_width_ub = 0;
+ dpte_row_height = 0;
+ dpte_req_height = 0; /* 64b dpte req height in data element */
+ dpte_req_width = 0; /* 64b dpte req width in data element */
+ dpte_group_width = 0;
+ log2_dpte_group_bytes = 0;
+ log2_dpte_group_length = 0;
+
+ if (surf_linear)
+ log2_vmpg_height = 0; /* one line high */
+ else
+ log2_vmpg_height = (log2_vmpg_bytes - 8) / 2 + log2_blk256_height;
+ log2_vmpg_width = log2_vmpg_bytes - log2_bytes_per_element - log2_vmpg_height;
+
+ /* only 3 possible shapes for dpte request in dimensions of ptes: 8x1, 4x2, 2x4. */
+ if (log2_blk_bytes <= log2_vmpg_bytes)
+ log2_dpte_req_height_ptes = 0;
+ else if (log2_blk_height - log2_vmpg_height >= 2)
+ log2_dpte_req_height_ptes = 2;
+ else
+ log2_dpte_req_height_ptes = log2_blk_height - log2_vmpg_height;
+ log2_dpte_req_width_ptes = 3 - log2_dpte_req_height_ptes;
+
+ /* Ensure we only have the 3 shapes */
+ ASSERT((log2_dpte_req_width_ptes == 3 && log2_dpte_req_height_ptes == 0) || /* 8x1 */
+ (log2_dpte_req_width_ptes == 2 && log2_dpte_req_height_ptes == 1) || /* 4x2 */
+ (log2_dpte_req_width_ptes == 1 && log2_dpte_req_height_ptes == 2)); /* 2x4 */
+
+ /* The dpte request dimensions in data elements is dpte_req_width x dpte_req_height
+ * log2_vmpg_width is how much 1 pte represent, now calculating how much a 64b pte req represent
+ * That depends on the pte shape (i.e. 8x1, 4x2, 2x4)
+ */
+ log2_dpte_req_height = log2_vmpg_height + log2_dpte_req_height_ptes;
+ log2_dpte_req_width = log2_vmpg_width + log2_dpte_req_width_ptes;
+ dpte_req_height = 1 << log2_dpte_req_height;
+ dpte_req_width = 1 << log2_dpte_req_width;
+
+ /* calculate pitch dpte row buffer can hold
+ * round the result down to a power of two.
+ */
+ if (surf_linear) {
+ log2_dpte_row_height_linear = dml_floor(
+ dml_log2(dpte_buf_in_pte_reqs * dpte_req_width / data_pitch),
+ 1);
+
+ ASSERT(log2_dpte_row_height_linear >= 3);
+
+ if (log2_dpte_row_height_linear > 7)
+ log2_dpte_row_height_linear = 7;
+
+ log2_dpte_row_height = log2_dpte_row_height_linear;
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+
+ /* For linear, the dpte row is pitch dependent and the pte requests wrap at the pitch boundary.
+ * the dpte_row_width_ub is the upper bound of data_pitch*dpte_row_height in elements with this unique buffering.
+ */
+ dpte_row_width_ub = dml_round_to_multiple(
+ data_pitch * dpte_row_height - 1,
+ dpte_req_width,
+ 1) + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ /* for tiled mode, row height is the same as req height and row store up to vp size upper bound */
+ if (!surf_vert) {
+ log2_dpte_row_height = log2_dpte_req_height;
+ dpte_row_width_ub = dml_round_to_multiple(vp_width - 1, dpte_req_width, 1)
+ + dpte_req_width;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_width;
+ } else {
+ log2_dpte_row_height =
+ (log2_blk_width < log2_dpte_req_width) ?
+ log2_blk_width : log2_dpte_req_width;
+ dpte_row_width_ub = dml_round_to_multiple(vp_height - 1, dpte_req_height, 1)
+ + dpte_req_height;
+ rq_dlg_param->dpte_req_per_row_ub = dpte_row_width_ub / dpte_req_height;
+ }
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+ }
+ rq_dlg_param->dpte_bytes_per_row_ub = rq_dlg_param->dpte_req_per_row_ub * 64;
+
+ /* From programming guide:
+ * There is a special case of saving only half of ptes returned due to buffer space limits.
+ * this case applies to 4 and 8bpe in horizontal access of a vp_width greater than 2560+16
+ * when the pte request is 2x4 ptes (which happens when vmpg_bytes =4kb and tile blk_bytes >=64kb).
+ */
+ if (!surf_vert && vp_width > (2560 + 16) && bytes_per_element >= 4 && log2_vmpg_bytes == 12
+ && log2_blk_bytes >= 16) {
+ log2_dpte_row_height = log2_dpte_row_height - 1; /*half of the full height */
+ rq_dlg_param->dpte_row_height = 1 << log2_dpte_row_height;
+ }
+
+ /* the dpte_group_bytes is reduced for the specific case of vertical
+ * access of a tile surface that has dpte request of 8x1 ptes.
+ */
+ if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) /*reduced, in this case, will have page fault within a group */
+ rq_sizing_param->dpte_group_bytes = 512;
+ else
+ /*full size */
+ rq_sizing_param->dpte_group_bytes = 2048;
+
+ /*since pte request size is 64byte, the number of data pte requests per full sized group is as follows. */
+ log2_dpte_group_bytes = dml_log2(rq_sizing_param->dpte_group_bytes);
+ log2_dpte_group_length = log2_dpte_group_bytes - 6; /*length in 64b requests */
+
+ /* full sized data pte group width in elements */
+ if (!surf_vert)
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_width;
+ else
+ log2_dpte_group_width = log2_dpte_group_length + log2_dpte_req_height;
+
+ dpte_group_width = 1 << log2_dpte_group_width;
+
+ /* since dpte groups are only aligned to dpte_req_width and not dpte_group_width,
+ * the upper bound for the dpte groups per row is as follows.
+ */
+ rq_dlg_param->dpte_groups_per_row_ub = dml_ceil(
+ (double) dpte_row_width_ub / dpte_group_width,
+ 1);
+
+ dml1_rq_dlg_get_row_heights(
+ mode_lib,
+ &func_dpte_row_height,
+ &func_meta_row_height,
+ vp_width,
+ data_pitch,
+ pipe_src_param.source_format,
+ pipe_src_param.sw_mode,
+ pipe_src_param.macro_tile_size,
+ pipe_src_param.source_scan,
+ is_chroma);
+
+ /* Just a check to make sure this function and the new one give the same
+ * result. The standalone get_row_heights() function is based off of the
+ * code in this function so the same changes need to be made to both.
+ */
+ if (rq_dlg_param->meta_row_height != func_meta_row_height) {
+ DTRACE(
+ "MISMATCH: rq_dlg_param->meta_row_height = %d",
+ rq_dlg_param->meta_row_height);
+ DTRACE("MISMATCH: func_meta_row_height = %d", func_meta_row_height);
+ ASSERT(0);
+ }
+
+ if (rq_dlg_param->dpte_row_height != func_dpte_row_height) {
+ DTRACE(
+ "MISMATCH: rq_dlg_param->dpte_row_height = %d",
+ rq_dlg_param->dpte_row_height);
+ DTRACE("MISMATCH: func_dpte_row_height = %d", func_dpte_row_height);
+ ASSERT(0);
+ }
+}
+
+void dml1_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+{
+ /* get param for luma surface */
+ rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
+ || pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_l),
+ &(rq_param->dlg.rq_l),
+ &(rq_param->misc.rq_l),
+ pipe_src_param,
+ 0);
+
+ if (is_dual_plane((enum source_format_class) pipe_src_param.source_format)) {
+ /* get param for chroma surface */
+ get_surf_rq_param(
+ mode_lib,
+ &(rq_param->sizing.rq_c),
+ &(rq_param->dlg.rq_c),
+ &(rq_param->misc.rq_c),
+ pipe_src_param,
+ 1);
+ }
+
+ /* calculate how to split the det buffer space between luma and chroma */
+ handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
+ print__rq_params_st(mode_lib, *rq_param);
+}
+
+/* Note: currently taken in as is.
+ * Nice to decouple code from hw register implement and extract code that are repeated for luma and chroma.
+ */
+void dml1_rq_dlg_get_dlg_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs,
+ const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool iflip_en)
+{
+ /* Timing */
+ unsigned int htotal = e2e_pipe_param.pipe.dest.htotal;
+ unsigned int hblank_end = e2e_pipe_param.pipe.dest.hblank_end;
+ unsigned int vblank_start = e2e_pipe_param.pipe.dest.vblank_start;
+ unsigned int vblank_end = e2e_pipe_param.pipe.dest.vblank_end;
+ bool interlaced = e2e_pipe_param.pipe.dest.interlaced;
+ unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
+
+ double pclk_freq_in_mhz = e2e_pipe_param.pipe.dest.pixel_rate_mhz;
+ double refclk_freq_in_mhz = e2e_pipe_param.clks_cfg.refclk_mhz;
+ double dppclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dppclk_mhz;
+ double dispclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dispclk_mhz;
+
+ double ref_freq_to_pix_freq;
+ double prefetch_xy_calc_in_dcfclk;
+ double min_dcfclk_mhz;
+ double t_calc_us;
+ double min_ttu_vblank;
+ double min_dst_y_ttu_vblank;
+ unsigned int dlg_vblank_start;
+ bool dcc_en;
+ bool dual_plane;
+ bool mode_422;
+ unsigned int access_dir;
+ unsigned int bytes_per_element_l;
+ unsigned int bytes_per_element_c;
+ unsigned int vp_height_l;
+ unsigned int vp_width_l;
+ unsigned int vp_height_c;
+ unsigned int vp_width_c;
+ unsigned int htaps_l;
+ unsigned int htaps_c;
+ double hratios_l;
+ double hratios_c;
+ double vratio_l;
+ double vratio_c;
+ double line_time_in_us;
+ double vinit_l;
+ double vinit_c;
+ double vinit_bot_l;
+ double vinit_bot_c;
+ unsigned int swath_height_l;
+ unsigned int swath_width_ub_l;
+ unsigned int dpte_bytes_per_row_ub_l;
+ unsigned int dpte_groups_per_row_ub_l;
+ unsigned int meta_pte_bytes_per_frame_ub_l;
+ unsigned int meta_bytes_per_row_ub_l;
+ unsigned int swath_height_c;
+ unsigned int swath_width_ub_c;
+ unsigned int dpte_bytes_per_row_ub_c;
+ unsigned int dpte_groups_per_row_ub_c;
+ unsigned int meta_chunks_per_row_ub_l;
+ unsigned int vupdate_offset;
+ unsigned int vupdate_width;
+ unsigned int vready_offset;
+ unsigned int dppclk_delay_subtotal;
+ unsigned int dispclk_delay_subtotal;
+ unsigned int pixel_rate_delay_subtotal;
+ unsigned int vstartup_start;
+ unsigned int dst_x_after_scaler;
+ unsigned int dst_y_after_scaler;
+ double line_wait;
+ double line_o;
+ double line_setup;
+ double line_calc;
+ double dst_y_prefetch;
+ double t_pre_us;
+ unsigned int vm_bytes;
+ unsigned int meta_row_bytes;
+ unsigned int max_num_sw_l;
+ unsigned int max_num_sw_c;
+ unsigned int max_partial_sw_l;
+ unsigned int max_partial_sw_c;
+ double max_vinit_l;
+ double max_vinit_c;
+ unsigned int lsw_l;
+ unsigned int lsw_c;
+ unsigned int sw_bytes_ub_l;
+ unsigned int sw_bytes_ub_c;
+ unsigned int sw_bytes;
+ unsigned int dpte_row_bytes;
+ double prefetch_bw;
+ double flip_bw;
+ double t_vm_us;
+ double t_r0_us;
+ double dst_y_per_vm_vblank;
+ double dst_y_per_row_vblank;
+ double min_dst_y_per_vm_vblank;
+ double min_dst_y_per_row_vblank;
+ double lsw;
+ double vratio_pre_l;
+ double vratio_pre_c;
+ unsigned int req_per_swath_ub_l;
+ unsigned int req_per_swath_ub_c;
+ unsigned int meta_row_height_l;
+ unsigned int swath_width_pixels_ub_l;
+ unsigned int swath_width_pixels_ub_c;
+ unsigned int scaler_rec_in_width_l;
+ unsigned int scaler_rec_in_width_c;
+ unsigned int dpte_row_height_l;
+ unsigned int dpte_row_height_c;
+ double hscale_pixel_rate_l;
+ double hscale_pixel_rate_c;
+ double min_hratio_fact_l;
+ double min_hratio_fact_c;
+ double refcyc_per_line_delivery_pre_l;
+ double refcyc_per_line_delivery_pre_c;
+ double refcyc_per_line_delivery_l;
+ double refcyc_per_line_delivery_c;
+ double refcyc_per_req_delivery_pre_l;
+ double refcyc_per_req_delivery_pre_c;
+ double refcyc_per_req_delivery_l;
+ double refcyc_per_req_delivery_c;
+ double refcyc_per_req_delivery_pre_cur0;
+ double refcyc_per_req_delivery_cur0;
+ unsigned int full_recout_width;
+ double hratios_cur0;
+ unsigned int cur0_src_width;
+ enum cursor_bpp cur0_bpp;
+ unsigned int cur0_req_size;
+ unsigned int cur0_req_width;
+ double cur0_width_ub;
+ double cur0_req_per_width;
+ double hactive_cur0;
+
+ memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
+ memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
+
+ DTRACE("DLG: %s: cstate_en = %d", __func__, cstate_en);
+ DTRACE("DLG: %s: pstate_en = %d", __func__, pstate_en);
+ DTRACE("DLG: %s: vm_en = %d", __func__, vm_en);
+ DTRACE("DLG: %s: iflip_en = %d", __func__, iflip_en);
+
+ /* ------------------------- */
+ /* Section 1.5.2.1: OTG dependent Params */
+ /* ------------------------- */
+ DTRACE("DLG: %s: dppclk_freq_in_mhz = %3.2f", __func__, dppclk_freq_in_mhz);
+ DTRACE("DLG: %s: dispclk_freq_in_mhz = %3.2f", __func__, dispclk_freq_in_mhz);
+ DTRACE("DLG: %s: refclk_freq_in_mhz = %3.2f", __func__, refclk_freq_in_mhz);
+ DTRACE("DLG: %s: pclk_freq_in_mhz = %3.2f", __func__, pclk_freq_in_mhz);
+ DTRACE("DLG: %s: interlaced = %d", __func__, interlaced);
+
+ ref_freq_to_pix_freq = refclk_freq_in_mhz / pclk_freq_in_mhz;
+ ASSERT(ref_freq_to_pix_freq < 4.0);
+ disp_dlg_regs->ref_freq_to_pix_freq =
+ (unsigned int) (ref_freq_to_pix_freq * dml_pow(2, 19));
+ disp_dlg_regs->refcyc_per_htotal = (unsigned int) (ref_freq_to_pix_freq * (double) htotal
+ * dml_pow(2, 8));
+ disp_dlg_regs->refcyc_h_blank_end = (unsigned int) ((double) hblank_end
+ * (double) ref_freq_to_pix_freq);
+ ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
+ disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
+
+ prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */
+ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
+ min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
+ if (cstate_en)
+ min_ttu_vblank = dml_max(dlg_sys_param.t_sr_wm_us, min_ttu_vblank);
+ if (pstate_en)
+ min_ttu_vblank = dml_max(dlg_sys_param.t_mclk_wm_us, min_ttu_vblank);
+ min_ttu_vblank = min_ttu_vblank + t_calc_us;
+
+ min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
+ dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
+
+ disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start
+ + min_dst_y_ttu_vblank) * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int) dml_pow(2, 18));
+
+ DTRACE("DLG: %s: min_dcfclk_mhz = %3.2f", __func__, min_dcfclk_mhz);
+ DTRACE("DLG: %s: min_ttu_vblank = %3.2f", __func__, min_ttu_vblank);
+ DTRACE(
+ "DLG: %s: min_dst_y_ttu_vblank = %3.2f",
+ __func__,
+ min_dst_y_ttu_vblank);
+ DTRACE("DLG: %s: t_calc_us = %3.2f", __func__, t_calc_us);
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->min_dst_y_next_start = 0x%0x",
+ __func__,
+ disp_dlg_regs->min_dst_y_next_start);
+ DTRACE(
+ "DLG: %s: ref_freq_to_pix_freq = %3.2f",
+ __func__,
+ ref_freq_to_pix_freq);
+
+ /* ------------------------- */
+ /* Section 1.5.2.2: Prefetch, Active and TTU */
+ /* ------------------------- */
+ /* Prefetch Calc */
+ /* Source */
+ dcc_en = e2e_pipe_param.pipe.src.dcc;
+ dual_plane = is_dual_plane(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format);
+ mode_422 = 0; /* FIXME */
+ access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
+ bytes_per_element_l = get_bytes_per_element(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ 0);
+ bytes_per_element_c = get_bytes_per_element(
+ (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ 1);
+ vp_height_l = e2e_pipe_param.pipe.src.viewport_height;
+ vp_width_l = e2e_pipe_param.pipe.src.viewport_width;
+ vp_height_c = e2e_pipe_param.pipe.src.viewport_height_c;
+ vp_width_c = e2e_pipe_param.pipe.src.viewport_width_c;
+
+ /* Scaling */
+ htaps_l = e2e_pipe_param.pipe.scale_taps.htaps;
+ htaps_c = e2e_pipe_param.pipe.scale_taps.htaps_c;
+ hratios_l = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
+ hratios_c = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio_c;
+ vratio_l = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio;
+ vratio_c = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio_c;
+
+ line_time_in_us = (htotal / pclk_freq_in_mhz);
+ vinit_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit;
+ vinit_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_c;
+ vinit_bot_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot;
+ vinit_bot_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot_c;
+
+ swath_height_l = rq_dlg_param.rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
+ dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
+ meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
+ meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+
+ swath_height_c = rq_dlg_param.rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
+ dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
+ vupdate_offset = e2e_pipe_param.pipe.dest.vupdate_offset;
+ vupdate_width = e2e_pipe_param.pipe.dest.vupdate_width;
+ vready_offset = e2e_pipe_param.pipe.dest.vready_offset;
+
+ dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
+ dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
+ pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ + dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
+
+ vstartup_start = e2e_pipe_param.pipe.dest.vstartup_start;
+
+ if (interlaced)
+ vstartup_start = vstartup_start / 2;
+
+ if (vstartup_start >= min_vblank) {
+ DTRACE(
+ "WARNING_DLG: %s: vblank_start=%d vblank_end=%d",
+ __func__,
+ vblank_start,
+ vblank_end);
+ DTRACE(
+ "WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ min_vblank = vstartup_start + 1;
+ DTRACE(
+ "WARNING_DLG: %s: vstartup_start=%d should be less than min_vblank=%d",
+ __func__,
+ vstartup_start,
+ min_vblank);
+ }
+
+ dst_x_after_scaler = 0;
+ dst_y_after_scaler = 0;
+
+ if (e2e_pipe_param.pipe.src.is_hsplit)
+ dst_x_after_scaler = pixel_rate_delay_subtotal
+ + e2e_pipe_param.pipe.dest.recout_width;
+ else
+ dst_x_after_scaler = pixel_rate_delay_subtotal;
+
+ if (e2e_pipe_param.dout.output_format == dm_420)
+ dst_y_after_scaler = 1;
+ else
+ dst_y_after_scaler = 0;
+
+ if (dst_x_after_scaler >= htotal) {
+ dst_x_after_scaler = dst_x_after_scaler - htotal;
+ dst_y_after_scaler = dst_y_after_scaler + 1;
+ }
+
+ DTRACE("DLG: %s: htotal = %d", __func__, htotal);
+ DTRACE(
+ "DLG: %s: pixel_rate_delay_subtotal = %d",
+ __func__,
+ pixel_rate_delay_subtotal);
+ DTRACE("DLG: %s: dst_x_after_scaler = %d", __func__, dst_x_after_scaler);
+ DTRACE("DLG: %s: dst_y_after_scaler = %d", __func__, dst_y_after_scaler);
+
+ line_wait = mode_lib->soc.urgent_latency_us;
+ if (cstate_en)
+ line_wait = dml_max(mode_lib->soc.sr_enter_plus_exit_time_us, line_wait);
+ if (pstate_en)
+ line_wait = dml_max(
+ mode_lib->soc.dram_clock_change_latency_us
+ + mode_lib->soc.urgent_latency_us,
+ line_wait);
+ line_wait = line_wait / line_time_in_us;
+
+ line_o = (double) dst_y_after_scaler + dst_x_after_scaler / (double) htotal;
+ line_setup = (double) (vupdate_offset + vupdate_width + vready_offset) / (double) htotal;
+ line_calc = t_calc_us / line_time_in_us;
+
+ DTRACE(
+ "DLG: %s: soc.sr_enter_plus_exit_time_us = %3.2f",
+ __func__,
+ (double) mode_lib->soc.sr_enter_plus_exit_time_us);
+ DTRACE(
+ "DLG: %s: soc.dram_clock_change_latency_us = %3.2f",
+ __func__,
+ (double) mode_lib->soc.dram_clock_change_latency_us);
+ DTRACE(
+ "DLG: %s: soc.urgent_latency_us = %3.2f",
+ __func__,
+ mode_lib->soc.urgent_latency_us);
+
+ DTRACE("DLG: %s: swath_height_l = %d", __func__, swath_height_l);
+ if (dual_plane)
+ DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
+
+ DTRACE(
+ "DLG: %s: t_srx_delay_us = %3.2f",
+ __func__,
+ (double) dlg_sys_param.t_srx_delay_us);
+ DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
+ DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
+ DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
+ DTRACE("DLG: %s: vready_offset = %d", __func__, vready_offset);
+ DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, line_time_in_us);
+ DTRACE("DLG: %s: line_wait = %3.2f", __func__, line_wait);
+ DTRACE("DLG: %s: line_o = %3.2f", __func__, line_o);
+ DTRACE("DLG: %s: line_setup = %3.2f", __func__, line_setup);
+ DTRACE("DLG: %s: line_calc = %3.2f", __func__, line_calc);
+
+ dst_y_prefetch = ((double) min_vblank - 1.0)
+ - (line_setup + line_calc + line_wait + line_o);
+ DTRACE("DLG: %s: dst_y_prefetch (before rnd) = %3.2f", __func__, dst_y_prefetch);
+ ASSERT(dst_y_prefetch >= 2.0);
+
+ dst_y_prefetch = dml_floor(4.0 * (dst_y_prefetch + 0.125), 1) / 4;
+ DTRACE("DLG: %s: dst_y_prefetch (after rnd) = %3.2f", __func__, dst_y_prefetch);
+
+ t_pre_us = dst_y_prefetch * line_time_in_us;
+ vm_bytes = 0;
+ meta_row_bytes = 0;
+
+ if (dcc_en && vm_en)
+ vm_bytes = meta_pte_bytes_per_frame_ub_l;
+ if (dcc_en)
+ meta_row_bytes = meta_bytes_per_row_ub_l;
+
+ max_num_sw_l = 0;
+ max_num_sw_c = 0;
+ max_partial_sw_l = 0;
+ max_partial_sw_c = 0;
+
+ max_vinit_l = interlaced ? dml_max(vinit_l, vinit_bot_l) : vinit_l;
+ max_vinit_c = interlaced ? dml_max(vinit_c, vinit_bot_c) : vinit_c;
+
+ get_swath_need(mode_lib, &max_num_sw_l, &max_partial_sw_l, swath_height_l, max_vinit_l);
+ if (dual_plane)
+ get_swath_need(
+ mode_lib,
+ &max_num_sw_c,
+ &max_partial_sw_c,
+ swath_height_c,
+ max_vinit_c);
+
+ lsw_l = max_num_sw_l * swath_height_l + max_partial_sw_l;
+ lsw_c = max_num_sw_c * swath_height_c + max_partial_sw_c;
+ sw_bytes_ub_l = lsw_l * swath_width_ub_l * bytes_per_element_l;
+ sw_bytes_ub_c = lsw_c * swath_width_ub_c * bytes_per_element_c;
+ sw_bytes = 0;
+ dpte_row_bytes = 0;
+
+ if (vm_en) {
+ if (dual_plane)
+ dpte_row_bytes = dpte_bytes_per_row_ub_l + dpte_bytes_per_row_ub_c;
+ else
+ dpte_row_bytes = dpte_bytes_per_row_ub_l;
+ } else {
+ dpte_row_bytes = 0;
+ }
+
+ if (dual_plane)
+ sw_bytes = sw_bytes_ub_l + sw_bytes_ub_c;
+ else
+ sw_bytes = sw_bytes_ub_l;
+
+ DTRACE("DLG: %s: sw_bytes_ub_l = %d", __func__, sw_bytes_ub_l);
+ DTRACE("DLG: %s: sw_bytes_ub_c = %d", __func__, sw_bytes_ub_c);
+ DTRACE("DLG: %s: sw_bytes = %d", __func__, sw_bytes);
+ DTRACE("DLG: %s: vm_bytes = %d", __func__, vm_bytes);
+ DTRACE("DLG: %s: meta_row_bytes = %d", __func__, meta_row_bytes);
+ DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes);
+
+ prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us;
+ flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param.total_flip_bw)
+ / (double) dlg_sys_param.total_flip_bytes;
+ t_vm_us = line_time_in_us / 4.0;
+ if (vm_en && dcc_en) {
+ t_vm_us = dml_max(
+ dlg_sys_param.t_extra_us,
+ dml_max((double) vm_bytes / prefetch_bw, t_vm_us));
+
+ if (iflip_en && !dual_plane) {
+ t_vm_us = dml_max(mode_lib->soc.urgent_latency_us, t_vm_us);
+ if (flip_bw > 0.)
+ t_vm_us = dml_max(vm_bytes / flip_bw, t_vm_us);
+ }
+ }
+
+ t_r0_us = dml_max(dlg_sys_param.t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
+
+ if (vm_en || dcc_en) {
+ t_r0_us = dml_max(
+ (double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw,
+ dlg_sys_param.t_extra_us);
+ t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us);
+
+ if (iflip_en && !dual_plane) {
+ t_r0_us = dml_max(mode_lib->soc.urgent_latency_us * 2.0, t_r0_us);
+ if (flip_bw > 0.)
+ t_r0_us = dml_max(
+ (dpte_row_bytes + meta_row_bytes) / flip_bw,
+ t_r0_us);
+ }
+ }
+
+ disp_dlg_regs->dst_y_after_scaler = dst_y_after_scaler; /* in terms of line */
+ disp_dlg_regs->refcyc_x_after_scaler = dst_x_after_scaler * ref_freq_to_pix_freq; /* in terms of refclk */
+ ASSERT(disp_dlg_regs->refcyc_x_after_scaler < (unsigned int) dml_pow(2, 13));
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->dst_y_after_scaler = 0x%0x",
+ __func__,
+ disp_dlg_regs->dst_y_after_scaler);
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->refcyc_x_after_scaler = 0x%0x",
+ __func__,
+ disp_dlg_regs->refcyc_x_after_scaler);
+
+ disp_dlg_regs->dst_y_prefetch = (unsigned int) (dst_y_prefetch * dml_pow(2, 2));
+ DTRACE(
+ "DLG: %s: disp_dlg_regs->dst_y_prefetch = %d",
+ __func__,
+ disp_dlg_regs->dst_y_prefetch);
+
+ dst_y_per_vm_vblank = 0.0;
+ dst_y_per_row_vblank = 0.0;
+
+ dst_y_per_vm_vblank = t_vm_us / line_time_in_us;
+ dst_y_per_vm_vblank = dml_floor(4.0 * (dst_y_per_vm_vblank + 0.125), 1) / 4.0;
+ disp_dlg_regs->dst_y_per_vm_vblank = (unsigned int) (dst_y_per_vm_vblank * dml_pow(2, 2));
+
+ dst_y_per_row_vblank = t_r0_us / line_time_in_us;
+ dst_y_per_row_vblank = dml_floor(4.0 * (dst_y_per_row_vblank + 0.125), 1) / 4.0;
+ disp_dlg_regs->dst_y_per_row_vblank = (unsigned int) (dst_y_per_row_vblank * dml_pow(2, 2));
+
+ DTRACE("DLG: %s: lsw_l = %d", __func__, lsw_l);
+ DTRACE("DLG: %s: lsw_c = %d", __func__, lsw_c);
+ DTRACE("DLG: %s: dpte_bytes_per_row_ub_l = %d", __func__, dpte_bytes_per_row_ub_l);
+ DTRACE("DLG: %s: dpte_bytes_per_row_ub_c = %d", __func__, dpte_bytes_per_row_ub_c);
+
+ DTRACE("DLG: %s: prefetch_bw = %3.2f", __func__, prefetch_bw);
+ DTRACE("DLG: %s: flip_bw = %3.2f", __func__, flip_bw);
+ DTRACE("DLG: %s: t_pre_us = %3.2f", __func__, t_pre_us);
+ DTRACE("DLG: %s: t_vm_us = %3.2f", __func__, t_vm_us);
+ DTRACE("DLG: %s: t_r0_us = %3.2f", __func__, t_r0_us);
+ DTRACE("DLG: %s: dst_y_per_vm_vblank = %3.2f", __func__, dst_y_per_vm_vblank);
+ DTRACE("DLG: %s: dst_y_per_row_vblank = %3.2f", __func__, dst_y_per_row_vblank);
+ DTRACE("DLG: %s: dst_y_prefetch = %3.2f", __func__, dst_y_prefetch);
+
+ min_dst_y_per_vm_vblank = 8.0;
+ min_dst_y_per_row_vblank = 16.0;
+ if (htotal <= 75) {
+ min_vblank = 300;
+ min_dst_y_per_vm_vblank = 100.0;
+ min_dst_y_per_row_vblank = 100.0;
+ }
+
+ ASSERT(dst_y_per_vm_vblank < min_dst_y_per_vm_vblank);
+ ASSERT(dst_y_per_row_vblank < min_dst_y_per_row_vblank);
+
+ ASSERT(dst_y_prefetch > (dst_y_per_vm_vblank + dst_y_per_row_vblank));
+ lsw = dst_y_prefetch - (dst_y_per_vm_vblank + dst_y_per_row_vblank);
+
+ DTRACE("DLG: %s: lsw = %3.2f", __func__, lsw);
+
+ vratio_pre_l = get_vratio_pre(
+ mode_lib,
+ max_num_sw_l,
+ max_partial_sw_l,
+ swath_height_l,
+ max_vinit_l,
+ lsw);
+ vratio_pre_c = 1.0;
+ if (dual_plane)
+ vratio_pre_c = get_vratio_pre(
+ mode_lib,
+ max_num_sw_c,
+ max_partial_sw_c,
+ swath_height_c,
+ max_vinit_c,
+ lsw);
+
+ DTRACE("DLG: %s: vratio_pre_l=%3.2f", __func__, vratio_pre_l);
+ DTRACE("DLG: %s: vratio_pre_c=%3.2f", __func__, vratio_pre_c);
+
+ ASSERT(vratio_pre_l <= 4.0);
+ if (vratio_pre_l >= 4.0)
+ disp_dlg_regs->vratio_prefetch = (unsigned int) dml_pow(2, 21) - 1;
+ else
+ disp_dlg_regs->vratio_prefetch = (unsigned int) (vratio_pre_l * dml_pow(2, 19));
+
+ ASSERT(vratio_pre_c <= 4.0);
+ if (vratio_pre_c >= 4.0)
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) dml_pow(2, 21) - 1;
+ else
+ disp_dlg_regs->vratio_prefetch_c = (unsigned int) (vratio_pre_c * dml_pow(2, 19));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_c);
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ (unsigned int) (dst_y_per_row_vblank * (double) htotal
+ * ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int) dml_pow(2, 13));
+
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
+
+ /* Active */
+ req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ swath_width_pixels_ub_l = 0;
+ swath_width_pixels_ub_c = 0;
+ scaler_rec_in_width_l = 0;
+ scaler_rec_in_width_c = 0;
+ dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_pte_row_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_pte_row_nom_c < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * dml_pow(2, 2));
+ ASSERT(disp_dlg_regs->dst_y_per_meta_row_nom_l < (unsigned int) dml_pow(2, 17));
+
+ disp_dlg_regs->dst_y_per_meta_row_nom_c = disp_dlg_regs->dst_y_per_meta_row_nom_l; /* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = (unsigned int) ((double) dpte_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_l = dml_pow(2, 23) - 1;
+
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = (unsigned int) ((double) dpte_row_height_c
+ / (double) vratio_c * (double) htotal * ref_freq_to_pix_freq
+ / (double) dpte_groups_per_row_ub_c);
+ if (disp_dlg_regs->refcyc_per_pte_group_nom_c >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_pte_group_nom_c = dml_pow(2, 23) - 1;
+
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = (unsigned int) ((double) meta_row_height_l
+ / (double) vratio_l * (double) htotal * ref_freq_to_pix_freq
+ / (double) meta_chunks_per_row_ub_l);
+ if (disp_dlg_regs->refcyc_per_meta_chunk_nom_l >= (unsigned int) dml_pow(2, 23))
+ disp_dlg_regs->refcyc_per_meta_chunk_nom_l = dml_pow(2, 23) - 1;
+
+ if (mode_422) {
+ swath_width_pixels_ub_l = swath_width_ub_l * 2; /* *2 for 2 pixel per element */
+ swath_width_pixels_ub_c = swath_width_ub_c * 2;
+ } else {
+ swath_width_pixels_ub_l = swath_width_ub_l * 1;
+ swath_width_pixels_ub_c = swath_width_ub_c * 1;
+ }
+
+ hscale_pixel_rate_l = 0.;
+ hscale_pixel_rate_c = 0.;
+ min_hratio_fact_l = 1.0;
+ min_hratio_fact_c = 1.0;
+
+ if (htaps_l <= 1)
+ min_hratio_fact_l = 2.0;
+ else if (htaps_l <= 6) {
+ if ((hratios_l * 2.0) > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratios_l * 2.0;
+ } else {
+ if (hratios_l > 4.0)
+ min_hratio_fact_l = 4.0;
+ else
+ min_hratio_fact_l = hratios_l;
+ }
+
+ hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
+
+ if (htaps_c <= 1)
+ min_hratio_fact_c = 2.0;
+ else if (htaps_c <= 6) {
+ if ((hratios_c * 2.0) > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratios_c * 2.0;
+ } else {
+ if (hratios_c > 4.0)
+ min_hratio_fact_c = 4.0;
+ else
+ min_hratio_fact_c = hratios_c;
+ }
+
+ hscale_pixel_rate_c = min_hratio_fact_c * dppclk_freq_in_mhz;
+
+ refcyc_per_line_delivery_pre_l = 0.;
+ refcyc_per_line_delivery_pre_c = 0.;
+ refcyc_per_line_delivery_l = 0.;
+ refcyc_per_line_delivery_c = 0.;
+
+ refcyc_per_req_delivery_pre_l = 0.;
+ refcyc_per_req_delivery_pre_c = 0.;
+ refcyc_per_req_delivery_l = 0.;
+ refcyc_per_req_delivery_c = 0.;
+ refcyc_per_req_delivery_pre_cur0 = 0.;
+ refcyc_per_req_delivery_cur0 = 0.;
+
+ full_recout_width = 0;
+ if (e2e_pipe_param.pipe.src.is_hsplit) {
+ if (e2e_pipe_param.pipe.dest.full_recout_width == 0) {
+ DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__);
+ full_recout_width = e2e_pipe_param.pipe.dest.recout_width * 2; /* assume half split for dcn1 */
+ } else
+ full_recout_width = e2e_pipe_param.pipe.dest.full_recout_width;
+ } else
+ full_recout_width = e2e_pipe_param.pipe.dest.recout_width;
+
+ refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); /* per line */
+
+ refcyc_per_line_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_l,
+ hscale_pixel_rate_l,
+ swath_width_pixels_ub_l,
+ 1); /* per line */
+
+ DTRACE("DLG: %s: full_recout_width = %d", __func__, full_recout_width);
+ DTRACE("DLG: %s: hscale_pixel_rate_l = %3.2f", __func__, hscale_pixel_rate_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_pre_l = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_pre_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_l = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_l);
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_l,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_l = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_l,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_l < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_l < (unsigned int) dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_line_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); /* per line */
+
+ refcyc_per_line_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_c,
+ hscale_pixel_rate_c,
+ swath_width_pixels_ub_c,
+ 1); /* per line */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_pre_c = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_pre_c);
+ DTRACE(
+ "DLG: %s: refcyc_per_line_delivery_c = %3.2f",
+ __func__,
+ refcyc_per_line_delivery_c);
+
+ disp_dlg_regs->refcyc_per_line_delivery_pre_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_pre_c,
+ 1);
+ disp_dlg_regs->refcyc_per_line_delivery_c = (unsigned int) dml_floor(
+ refcyc_per_line_delivery_c,
+ 1);
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_pre_c < (unsigned int) dml_pow(2, 13));
+ ASSERT(disp_dlg_regs->refcyc_per_line_delivery_c < (unsigned int) dml_pow(2, 13));
+ }
+ disp_dlg_regs->chunk_hdl_adjust_cur0 = 3;
+
+ /* TTU - Luma / Chroma */
+ if (access_dir) { /* vertical access */
+ scaler_rec_in_width_l = vp_height_l;
+ scaler_rec_in_width_c = vp_height_c;
+ } else {
+ scaler_rec_in_width_l = vp_width_l;
+ scaler_rec_in_width_c = vp_width_c;
+ }
+
+ refcyc_per_req_delivery_pre_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); /* per req */
+ refcyc_per_req_delivery_l = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_l,
+ hscale_pixel_rate_l,
+ scaler_rec_in_width_l,
+ req_per_swath_ub_l); /* per req */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_l = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_l);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_l = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_l);
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_l = (unsigned int) (refcyc_per_req_delivery_pre_l
+ * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_l = (unsigned int) (refcyc_per_req_delivery_l
+ * dml_pow(2, 10));
+
+ ASSERT(refcyc_per_req_delivery_pre_l < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_l < dml_pow(2, 13));
+
+ if (dual_plane) {
+ refcyc_per_req_delivery_pre_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_pre_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); /* per req */
+ refcyc_per_req_delivery_c = get_refcyc_per_delivery(
+ mode_lib,
+ refclk_freq_in_mhz,
+ pclk_freq_in_mhz,
+ full_recout_width,
+ vratio_c,
+ hscale_pixel_rate_c,
+ scaler_rec_in_width_c,
+ req_per_swath_ub_c); /* per req */
+
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_c = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_c);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_c = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_c);
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_c =
+ (unsigned int) (refcyc_per_req_delivery_pre_c * dml_pow(2, 10));
+ disp_ttu_regs->refcyc_per_req_delivery_c = (unsigned int) (refcyc_per_req_delivery_c
+ * dml_pow(2, 10));
+
+ ASSERT(refcyc_per_req_delivery_pre_c < dml_pow(2, 13));
+ ASSERT(refcyc_per_req_delivery_c < dml_pow(2, 13));
+ }
+
+ /* TTU - Cursor */
+ hratios_cur0 = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
+ cur0_src_width = e2e_pipe_param.pipe.src.cur0_src_width; /* cursor source width */
+ cur0_bpp = (enum cursor_bpp) e2e_pipe_param.pipe.src.cur0_bpp;
+ cur0_req_size = 0;
+ cur0_req_width = 0;
+ cur0_width_ub = 0.0;
+ cur0_req_per_width = 0.0;
+ hactive_cur0 = 0.0;
+
+ ASSERT(cur0_src_width <= 256);
+
+ if (cur0_src_width > 0) {
+ unsigned int cur0_bit_per_pixel = 0;
+
+ if (cur0_bpp == dm_cur_2bit) {
+ cur0_req_size = 64; /* byte */
+ cur0_bit_per_pixel = 2;
+ } else { /* 32bit */
+ cur0_bit_per_pixel = 32;
+ if (cur0_src_width >= 1 && cur0_src_width <= 16)
+ cur0_req_size = 64;
+ else if (cur0_src_width >= 17 && cur0_src_width <= 31)
+ cur0_req_size = 128;
+ else
+ cur0_req_size = 256;
+ }
+
+ cur0_req_width = (double) cur0_req_size / ((double) cur0_bit_per_pixel / 8.0);
+ cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1)
+ * (double) cur0_req_width;
+ cur0_req_per_width = cur0_width_ub / (double) cur0_req_width;
+ hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */
+
+ if (vratio_pre_l <= 1.0) {
+ refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq
+ / (double) cur0_req_per_width;
+ } else {
+ refcyc_per_req_delivery_pre_cur0 = (double) refclk_freq_in_mhz
+ * (double) cur0_src_width / hscale_pixel_rate_l
+ / (double) cur0_req_per_width;
+ }
+
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_pre_cur0 * dml_pow(2, 10));
+ ASSERT(refcyc_per_req_delivery_pre_cur0 < dml_pow(2, 13));
+
+ if (vratio_l <= 1.0) {
+ refcyc_per_req_delivery_cur0 = hactive_cur0 * ref_freq_to_pix_freq
+ / (double) cur0_req_per_width;
+ } else {
+ refcyc_per_req_delivery_cur0 = (double) refclk_freq_in_mhz
+ * (double) cur0_src_width / hscale_pixel_rate_l
+ / (double) cur0_req_per_width;
+ }
+
+ DTRACE("DLG: %s: cur0_req_width = %d", __func__, cur0_req_width);
+ DTRACE(
+ "DLG: %s: cur0_width_ub = %3.2f",
+ __func__,
+ cur0_width_ub);
+ DTRACE(
+ "DLG: %s: cur0_req_per_width = %3.2f",
+ __func__,
+ cur0_req_per_width);
+ DTRACE(
+ "DLG: %s: hactive_cur0 = %3.2f",
+ __func__,
+ hactive_cur0);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_pre_cur0 = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_pre_cur0);
+ DTRACE(
+ "DLG: %s: refcyc_per_req_delivery_cur0 = %3.2f",
+ __func__,
+ refcyc_per_req_delivery_cur0);
+
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 =
+ (unsigned int) (refcyc_per_req_delivery_cur0 * dml_pow(2, 10));
+ ASSERT(refcyc_per_req_delivery_cur0 < dml_pow(2, 13));
+ } else {
+ disp_ttu_regs->refcyc_per_req_delivery_pre_cur0 = 0;
+ disp_ttu_regs->refcyc_per_req_delivery_cur0 = 0;
+ }
+
+ /* TTU - Misc */
+ disp_ttu_regs->qos_level_low_wm = 0;
+ ASSERT(disp_ttu_regs->qos_level_low_wm < dml_pow(2, 14));
+ disp_ttu_regs->qos_level_high_wm = (unsigned int) (4.0 * (double) htotal
+ * ref_freq_to_pix_freq);
+ ASSERT(disp_ttu_regs->qos_level_high_wm < dml_pow(2, 14));
+
+ disp_ttu_regs->qos_level_flip = 14;
+ disp_ttu_regs->qos_level_fixed_l = 8;
+ disp_ttu_regs->qos_level_fixed_c = 8;
+ disp_ttu_regs->qos_level_fixed_cur0 = 8;
+ disp_ttu_regs->qos_ramp_disable_l = 0;
+ disp_ttu_regs->qos_ramp_disable_c = 0;
+ disp_ttu_regs->qos_ramp_disable_cur0 = 0;
+
+ disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
+ ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
+
+ print__ttu_regs_st(mode_lib, *disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
new file mode 100644
index 000000000000..987d7671cd0f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_RQ_DLG_CALC_H__
+#define __DISPLAY_RQ_DLG_CALC_H__
+
+#include "dml_common_defs.h"
+#include "display_rq_dlg_helpers.h"
+
+struct display_mode_lib;
+
+void dml1_extract_rq_regs(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ const struct _vcs_dpi_display_rq_params_st rq_param);
+/* Function: dml_rq_dlg_get_rq_params
+ * Calculate requestor related parameters that register definition agnostic
+ * (i.e. this layer does try to separate real values from register definition)
+ * Input:
+ * pipe_src_param - pipe source configuration (e.g. vp, pitch, etc.)
+ * Output:
+ * rq_param - values that can be used to setup RQ (e.g. swath_height, plane1_addr, etc.)
+ */
+void dml1_rq_dlg_get_rq_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_rq_params_st *rq_param,
+ const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param);
+
+
+/* Function: dml_rq_dlg_get_dlg_params
+ * Calculate deadline related parameters
+ */
+void dml1_rq_dlg_get_dlg_params(
+ struct display_mode_lib *mode_lib,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const bool cstate_en,
+ const bool pstate_en,
+ const bool vm_en,
+ const bool iflip_en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
new file mode 100644
index 000000000000..b953b02a1512
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dml_common_defs.h"
+#include "../calcs/dcn_calc_math.h"
+
+#include "dml_inline_defs.h"
+
+double dml_round(double a)
+{
+ double round_pt = 0.5;
+ double ceil = dml_ceil(a, 1);
+ double floor = dml_floor(a, 1);
+
+ if (a - floor >= round_pt)
+ return ceil;
+ else
+ return floor;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
new file mode 100644
index 000000000000..b2847bc469fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_COMMON_DEFS_H__
+#define __DC_COMMON_DEFS_H__
+
+#include "dm_services.h"
+#include "dc_features.h"
+#include "display_mode_structs.h"
+#include "display_mode_enums.h"
+
+#define dml_print(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
+#define DTRACE(str, ...) {dm_logger_write(mode_lib->logger, LOG_DML, str, ##__VA_ARGS__); }
+
+double dml_round(double a);
+
+#endif /* __DC_COMMON_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
new file mode 100644
index 000000000000..e68086b8a22f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DML_INLINE_DEFS_H__
+#define __DML_INLINE_DEFS_H__
+
+#include "dml_common_defs.h"
+#include "../calcs/dcn_calc_math.h"
+
+static inline double dml_min(double a, double b)
+{
+ return (double) dcn_bw_min2(a, b);
+}
+
+static inline double dml_max(double a, double b)
+{
+ return (double) dcn_bw_max2(a, b);
+}
+
+static inline double dml_max3(double a, double b, double c)
+{
+ return dml_max(dml_max(a, b), c);
+}
+
+static inline double dml_max4(double a, double b, double c, double d)
+{
+ return dml_max(dml_max(a, b), dml_max(c, d));
+}
+
+static inline double dml_max5(double a, double b, double c, double d, double e)
+{
+ return dml_max(dml_max4(a, b, c, d), e);
+}
+
+static inline double dml_ceil(double a, double granularity)
+{
+ return (double) dcn_bw_ceil2(a, granularity);
+}
+
+static inline double dml_floor(double a, double granularity)
+{
+ return (double) dcn_bw_floor2(a, granularity);
+}
+
+static inline int dml_log2(double x)
+{
+ return dml_round((double)dcn_bw_log(x, 2));
+}
+
+static inline double dml_pow(double a, int exp)
+{
+ return (double) dcn_bw_pow(a, exp);
+}
+
+static inline double dml_fmod(double f, int val)
+{
+ return (double) dcn_bw_mod(f, val);
+}
+
+static inline double dml_ceil_2(double f)
+{
+ return (double) dcn_bw_ceil2(f, 2);
+}
+
+static inline double dml_ceil_ex(double x, double granularity)
+{
+ return (double) dcn_bw_ceil2(x, granularity);
+}
+
+static inline double dml_floor_ex(double x, double granularity)
+{
+ return (double) dcn_bw_floor2(x, granularity);
+}
+
+static inline double dml_log(double x, double base)
+{
+ return (double) dcn_bw_log(x, base);
+}
+
+static inline unsigned int dml_round_to_multiple(unsigned int num,
+ unsigned int multiple,
+ bool up)
+{
+ unsigned int remainder;
+
+ if (multiple == 0)
+ return num;
+
+ remainder = num % multiple;
+
+ if (remainder == 0)
+ return num;
+
+ if (up)
+ return (num + multiple - remainder);
+ else
+ return (num - remainder);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
new file mode 100644
index 000000000000..bc7d8c707221
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "soc_bounding_box.h"
+#include "display_mode_lib.h"
+#include "dc_features.h"
+
+#include "dml_inline_defs.h"
+void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box)
+{
+ to_box->dram_clock_change_latency_us = from_box->dram_clock_change_latency_us;
+ to_box->sr_exit_time_us = from_box->sr_exit_time_us;
+ to_box->sr_enter_plus_exit_time_us = from_box->sr_enter_plus_exit_time_us;
+ to_box->urgent_latency_us = from_box->urgent_latency_us;
+ to_box->writeback_latency_us = from_box->writeback_latency_us;
+}
+
+voltage_scaling_st dml_socbb_voltage_scaling(
+ const soc_bounding_box_st *soc,
+ enum voltage_state voltage)
+{
+ const voltage_scaling_st *voltage_state;
+ const voltage_scaling_st * const voltage_end = soc->clock_limits + DC__VOLTAGE_STATES;
+
+ for (voltage_state = soc->clock_limits;
+ voltage_state < voltage_end && voltage_state->state != voltage;
+ voltage_state++) {
+ }
+
+ if (voltage_state < voltage_end)
+ return *voltage_state;
+ return soc->clock_limits[DC__VOLTAGE_STATES - 1];
+}
+
+double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage)
+{
+ double return_bw;
+
+ voltage_scaling_st state = dml_socbb_voltage_scaling(box, voltage);
+
+ return_bw = dml_min((double) box->return_bus_width_bytes * state.dcfclk_mhz,
+ state.dram_bw_per_chan_gbps * 1000.0 * (double) box->num_chans
+ * box->ideal_dram_bw_after_urgent_percent / 100.0);
+
+ return_bw = dml_min((double) box->return_bus_width_bytes * state.fabricclk_mhz, return_bw);
+
+ return return_bw;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
new file mode 100644
index 000000000000..7a65206a6d21
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/soc_bounding_box.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __SOC_BOUNDING_BOX_H__
+#define __SOC_BOUNDING_BOX_H__
+
+#include "dml_common_defs.h"
+
+void dml_socbb_set_latencies(soc_bounding_box_st *to_box, soc_bounding_box_st *from_box);
+voltage_scaling_st dml_socbb_voltage_scaling(const soc_bounding_box_st *box, enum voltage_state voltage);
+double dml_socbb_return_bw_mhz(soc_bounding_box_st *box, enum voltage_state voltage);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
new file mode 100644
index 000000000000..70d01a9e9676
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -0,0 +1,58 @@
+#
+# Makefile for the 'gpio' sub-component of DAL.
+# It provides the control and status of HW GPIO pins.
+
+GPIO = gpio_base.o gpio_service.o hw_factory.o \
+ hw_gpio.o hw_hpd.o hw_ddc.o hw_translate.o
+
+AMD_DAL_GPIO = $(addprefix $(AMDDALPATH)/dc/gpio/,$(GPIO))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+# all DCE8.x are derived from DCE8.0
+GPIO_DCE80 = hw_translate_dce80.o hw_factory_dce80.o
+
+AMD_DAL_GPIO_DCE80 = $(addprefix $(AMDDALPATH)/dc/gpio/dce80/,$(GPIO_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE80)
+
+###############################################################################
+# DCE 11x
+###############################################################################
+GPIO_DCE110 = hw_translate_dce110.o hw_factory_dce110.o
+
+AMD_DAL_GPIO_DCE110 = $(addprefix $(AMDDALPATH)/dc/gpio/dce110/,$(GPIO_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE110)
+
+###############################################################################
+# DCE 12x
+###############################################################################
+GPIO_DCE120 = hw_translate_dce120.o hw_factory_dce120.o
+
+AMD_DAL_GPIO_DCE120 = $(addprefix $(AMDDALPATH)/dc/gpio/dce120/,$(GPIO_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
+
+###############################################################################
+# DCN 1x
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
+
+AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN10)
+endif
+
+###############################################################################
+# Diagnostics on FPGA
+###############################################################################
+GPIO_DIAG_FPGA = hw_translate_diag.o hw_factory_diag.o
+
+AMD_DAL_GPIO_DIAG_FPGA = $(addprefix $(AMDDALPATH)/dc/gpio/diagnostics/,$(GPIO_DIAG_FPGA))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DIAG_FPGA)
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
new file mode 100644
index 000000000000..20d81bca119c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+#include "hw_factory_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define REG(reg_name)\
+ mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ mm ## block ## id ## _ ## reg_name
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+
+/*
+ * dal_hw_factory_dce110_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dce110_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
new file mode 100644
index 000000000000..ecf06ed0d587
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE110_H__
+#define __DAL_HW_FACTORY_DCE110_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dce110_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
new file mode 100644
index 000000000000..ac4cddbba815
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "hw_translate_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case mmDC_GPIO_GENERIC_A:
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case mmDC_GPIO_HPD_A:
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case mmDC_GPIO_SYNCA_A:
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* mmDC_GPIO_GENLK_MASK */
+ case mmDC_GPIO_GENLK_A:
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case mmDC_GPIO_DDC1_A:
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case mmDC_GPIO_DDC2_A:
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case mmDC_GPIO_DDC3_A:
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case mmDC_GPIO_DDC4_A:
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case mmDC_GPIO_DDC5_A:
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case mmDC_GPIO_DDC6_A:
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case mmDC_GPIO_DDCVGA_A:
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case mmDC_GPIO_I2CPAD_A:
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case mmDC_GPIO_PWRSEQ_A:
+ case mmDC_GPIO_PAD_STRENGTH_1:
+ case mmDC_GPIO_PAD_STRENGTH_2:
+ case mmDC_GPIO_DEBUG:
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = mmDC_GPIO_GENERIC_A;
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = mmDC_GPIO_HPD_A;
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dce110_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dce110_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
new file mode 100644
index 000000000000..4d16e09853c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE110_H__
+#define __DAL_HW_TRANSLATE_DCE110_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dce110_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
new file mode 100644
index 000000000000..4ced9a7d63dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "hw_factory_dce120.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#define block HPD
+#define reg_num 0
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+/*
+ * dal_hw_factory_dce120_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dce120_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h
new file mode 100644
index 000000000000..db260c351f73
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE120_H__
+#define __DAL_HW_FACTORY_DCE120_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dce120_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
new file mode 100644
index 000000000000..af3843a69652
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "hw_translate_dce120.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case REG(DC_GPIO_SYNCA_A):
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDC6_A):
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case REG(DC_GPIO_I2CPAD_A):
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case REG(DC_GPIO_PWRSEQ_A):
+ case REG(DC_GPIO_PAD_STRENGTH_1):
+ case REG(DC_GPIO_PAD_STRENGTH_2):
+ case REG(DC_GPIO_DEBUG):
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dce120_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dce120_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h
new file mode 100644
index 000000000000..c21766894af3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE120_H__
+#define __DAL_HW_TRANSLATE_DCE120_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dce120_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
new file mode 100644
index 000000000000..48b67866377e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+#include "hw_factory_dce80.h"
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#define REG(reg_name)\
+ mm ## reg_name
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define HPD_REG_LIST_DCE8(id) \
+ HPD_GPIO_REG_LIST(id), \
+ .int_status = mmDC_HPD ## id ## _INT_STATUS,\
+ .toggle_filt_cntl = mmDC_HPD ## id ## _TOGGLE_FILT_CNTL
+
+#define HPD_MASK_SH_LIST_DCE8(mask_sh) \
+ .DC_HPD_SENSE_DELAYED = DC_HPD1_INT_STATUS__DC_HPD1_SENSE_DELAYED ## mask_sh,\
+ .DC_HPD_SENSE = DC_HPD1_INT_STATUS__DC_HPD1_SENSE ## mask_sh,\
+ .DC_HPD_CONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_CONNECT_INT_DELAY ## mask_sh,\
+ .DC_HPD_DISCONNECT_INT_DELAY = DC_HPD1_TOGGLE_FILT_CNTL__DC_HPD1_DISCONNECT_INT_DELAY ## mask_sh
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST_DCE8(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5),
+ hpd_regs(6)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST_DCE8(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST_DCE8(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+
+void dal_hw_factory_dce80_init(
+ struct hw_factory *factory)
+{
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
new file mode 100644
index 000000000000..e78a8b36f35a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCE80_H__
+#define __DAL_HW_FACTORY_DCE80_H__
+
+void dal_hw_factory_dce80_init(
+ struct hw_factory *factory);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
new file mode 100644
index 000000000000..fabb9da504be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "hw_translate_dce80.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+#include "smu/smu_7_0_1_d.h"
+
+/*
+ * @brief
+ * Returns index of first bit (starting with LSB) which is set
+ */
+static uint32_t index_from_vector(
+ uint32_t vector)
+{
+ uint32_t result = 0;
+ uint32_t mask = 1;
+
+ do {
+ if (vector == mask)
+ return result;
+
+ ++result;
+ mask <<= 1;
+ } while (mask);
+
+ BREAK_TO_DEBUGGER();
+
+ return GPIO_ENUM_UNKNOWN;
+}
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case mmDC_GPIO_GENERIC_A:
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* HPD */
+ case mmDC_GPIO_HPD_A:
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case mmDC_GPIO_SYNCA_A:
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* mmDC_GPIO_GENLK_MASK */
+ case mmDC_GPIO_GENLK_A:
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ break;
+ /* GPIOPAD */
+ case mmGPIOPAD_A:
+ *id = GPIO_ID_GPIO_PAD;
+ *en = index_from_vector(mask);
+ return (*en <= GPIO_GPIO_PAD_MAX);
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case mmDC_GPIO_DDC1_A:
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case mmDC_GPIO_DDC2_A:
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case mmDC_GPIO_DDC3_A:
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case mmDC_GPIO_DDC4_A:
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case mmDC_GPIO_DDC5_A:
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case mmDC_GPIO_DDC6_A:
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case mmDC_GPIO_DDCVGA_A:
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case mmDC_GPIO_I2CPAD_A:
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case mmDC_GPIO_PWRSEQ_A:
+ case mmDC_GPIO_PAD_STRENGTH_1:
+ case mmDC_GPIO_PAD_STRENGTH_2:
+ case mmDC_GPIO_DEBUG:
+ return false;
+ /* UNEXPECTED */
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = mmDC_GPIO_DDC1_A;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = mmDC_GPIO_DDC2_A;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = mmDC_GPIO_DDC3_A;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = mmDC_GPIO_DDC4_A;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = mmDC_GPIO_DDC5_A;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = mmDC_GPIO_DDC6_A;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = mmDC_GPIO_DDCVGA_A;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = mmDC_GPIO_I2CPAD_A;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = mmDC_GPIO_GENERIC_A;
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = mmDC_GPIO_HPD_A;
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = mmDC_GPIO_SYNCA_A;
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = mmDC_GPIO_GENLK_A;
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+ break;
+ case GPIO_ID_GPIO_PAD:
+ info->offset = mmGPIOPAD_A;
+ info->mask = (1 << en);
+ result = (info->mask <= GPIO_GPIO_PAD_MAX);
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ BREAK_TO_DEBUGGER();
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+void dal_hw_translate_dce80_init(
+ struct hw_translate *translate)
+{
+ translate->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
new file mode 100644
index 000000000000..374f2f3282a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCE80_H__
+#define __DAL_HW_TRANSLATE_DCE80_H__
+
+void dal_hw_translate_dce80_init(
+ struct hw_translate *tr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
new file mode 100644
index 000000000000..409763c70ce5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+#include "hw_factory_dcn10.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#define block HPD
+#define reg_num 0
+
+/* set field name */
+#define SF_HPD(reg_name, field_name, post_fix)\
+ .field_name = HPD0_ ## reg_name ## __ ## field_name ## post_fix
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#include "reg_helper.h"
+#include "../hpd_regs.h"
+
+#define hpd_regs(id) \
+{\
+ HPD_REG_LIST(id)\
+}
+
+static const struct hpd_registers hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+ hpd_regs(2),
+ hpd_regs(3),
+ hpd_regs(4),
+ hpd_regs(5)
+};
+
+static const struct hpd_sh_mask hpd_shift = {
+ HPD_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct hpd_sh_mask hpd_mask = {
+ HPD_MASK_SH_LIST(_MASK)
+};
+
+#include "../ddc_regs.h"
+
+ /* set field name */
+#define SF_DDC(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+static const struct ddc_registers ddc_data_regs[] = {
+ ddc_data_regs(1),
+ ddc_data_regs(2),
+ ddc_data_regs(3),
+ ddc_data_regs(4),
+ ddc_data_regs(5),
+ ddc_data_regs(6),
+ ddc_vga_data_regs,
+ ddc_i2c_data_regs
+};
+
+static const struct ddc_registers ddc_clk_regs[] = {
+ ddc_clk_regs(1),
+ ddc_clk_regs(2),
+ ddc_clk_regs(3),
+ ddc_clk_regs(4),
+ ddc_clk_regs(5),
+ ddc_clk_regs(6),
+ ddc_vga_clk_regs,
+ ddc_i2c_clk_regs
+};
+
+static const struct ddc_sh_mask ddc_shift = {
+ DDC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct ddc_sh_mask ddc_mask = {
+ DDC_MASK_SH_LIST(_MASK)
+};
+
+static void define_ddc_registers(
+ struct hw_gpio_pin *pin,
+ uint32_t en)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(pin);
+
+ switch (pin->id) {
+ case GPIO_ID_DDC_DATA:
+ ddc->regs = &ddc_data_regs[en];
+ ddc->base.regs = &ddc_data_regs[en].gpio;
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ ddc->regs = &ddc_clk_regs[en];
+ ddc->base.regs = &ddc_clk_regs[en].gpio;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ ddc->shifts = &ddc_shift;
+ ddc->masks = &ddc_mask;
+
+}
+
+static void define_hpd_registers(struct hw_gpio_pin *pin, uint32_t en)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(pin);
+
+ hpd->regs = &hpd_regs[en];
+ hpd->shifts = &hpd_shift;
+ hpd->masks = &hpd_mask;
+ hpd->base.regs = &hpd_regs[en].gpio;
+}
+
+
+/* fucntion table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = dal_hw_ddc_create,
+ .create_ddc_clock = dal_hw_ddc_create,
+ .create_generic = NULL,
+ .create_hpd = dal_hw_hpd_create,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+ .define_hpd_registers = define_hpd_registers,
+ .define_ddc_registers = define_ddc_registers
+};
+/*
+ * dal_hw_factory_dcn10_init
+ *
+ * @brief
+ * Initialize HW factory function pointers and pin info
+ *
+ * @param
+ * struct hw_factory *factory - [out] struct of function pointers
+ */
+void dal_hw_factory_dcn10_init(struct hw_factory *factory)
+{
+ /*TODO check ASIC CAPs*/
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h
new file mode 100644
index 000000000000..2cc7a585b1f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DCN10_H__
+#define __DAL_HW_FACTORY_DCN10_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_dcn10_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
new file mode 100644
index 000000000000..64a6915b846b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "hw_translate_dcn10.h"
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_translate.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define REG(reg_name)\
+ BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
+
+#define REGI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+static bool offset_to_id(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en)
+{
+ switch (offset) {
+ /* GENERIC */
+ case REG(DC_GPIO_GENERIC_A):
+ *id = GPIO_ID_GENERIC;
+ switch (mask) {
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK:
+ *en = GPIO_GENERIC_A;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK:
+ *en = GPIO_GENERIC_B;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK:
+ *en = GPIO_GENERIC_C;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK:
+ *en = GPIO_GENERIC_D;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK:
+ *en = GPIO_GENERIC_E;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK:
+ *en = GPIO_GENERIC_F;
+ return true;
+ case DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK:
+ *en = GPIO_GENERIC_G;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* HPD */
+ case REG(DC_GPIO_HPD_A):
+ *id = GPIO_ID_HPD;
+ switch (mask) {
+ case DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK:
+ *en = GPIO_HPD_1;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK:
+ *en = GPIO_HPD_2;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK:
+ *en = GPIO_HPD_3;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK:
+ *en = GPIO_HPD_4;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK:
+ *en = GPIO_HPD_5;
+ return true;
+ case DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK:
+ *en = GPIO_HPD_6;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* SYNCA */
+ case REG(DC_GPIO_SYNCA_A):
+ *id = GPIO_ID_SYNC;
+ switch (mask) {
+ case DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK:
+ *en = GPIO_SYNC_HSYNC_A;
+ return true;
+ case DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK:
+ *en = GPIO_SYNC_VSYNC_A;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* REG(DC_GPIO_GENLK_MASK */
+ case REG(DC_GPIO_GENLK_A):
+ *id = GPIO_ID_GSL;
+ switch (mask) {
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK:
+ *en = GPIO_GSL_GENLOCK_CLOCK;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK:
+ *en = GPIO_GSL_GENLOCK_VSYNC;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_A;
+ return true;
+ case DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK:
+ *en = GPIO_GSL_SWAPLOCK_B;
+ return true;
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+ break;
+ /* DDC */
+ /* we don't care about the GPIO_ID for DDC
+ * in DdcHandle it will use GPIO_ID_DDC_DATA/GPIO_ID_DDC_CLOCK
+ * directly in the create method */
+ case REG(DC_GPIO_DDC1_A):
+ *en = GPIO_DDC_LINE_DDC1;
+ return true;
+ case REG(DC_GPIO_DDC2_A):
+ *en = GPIO_DDC_LINE_DDC2;
+ return true;
+ case REG(DC_GPIO_DDC3_A):
+ *en = GPIO_DDC_LINE_DDC3;
+ return true;
+ case REG(DC_GPIO_DDC4_A):
+ *en = GPIO_DDC_LINE_DDC4;
+ return true;
+ case REG(DC_GPIO_DDC5_A):
+ *en = GPIO_DDC_LINE_DDC5;
+ return true;
+ case REG(DC_GPIO_DDC6_A):
+ *en = GPIO_DDC_LINE_DDC6;
+ return true;
+ case REG(DC_GPIO_DDCVGA_A):
+ *en = GPIO_DDC_LINE_DDC_VGA;
+ return true;
+ /* GPIO_I2CPAD */
+ case REG(DC_GPIO_I2CPAD_A):
+ *en = GPIO_DDC_LINE_I2C_PAD;
+ return true;
+ /* Not implemented */
+ case REG(DC_GPIO_PWRSEQ_A):
+ case REG(DC_GPIO_PAD_STRENGTH_1):
+ case REG(DC_GPIO_PAD_STRENGTH_2):
+ case REG(DC_GPIO_DEBUG):
+ return false;
+ /* UNEXPECTED */
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+static bool id_to_offset(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info)
+{
+ bool result = true;
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6DATA_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ info->mask = DC_GPIO_DDC6_A__DC_GPIO_DDC6CLK_A_MASK;
+ switch (en) {
+ case GPIO_DDC_LINE_DDC1:
+ info->offset = REG(DC_GPIO_DDC1_A);
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ info->offset = REG(DC_GPIO_DDC2_A);
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ info->offset = REG(DC_GPIO_DDC3_A);
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ info->offset = REG(DC_GPIO_DDC4_A);
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ info->offset = REG(DC_GPIO_DDC5_A);
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ info->offset = REG(DC_GPIO_DDC6_A);
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ info->offset = REG(DC_GPIO_DDCVGA_A);
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ info->offset = REG(DC_GPIO_I2CPAD_A);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GENERIC:
+ info->offset = REG(DC_GPIO_GENERIC_A);
+ switch (en) {
+ case GPIO_GENERIC_A:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICA_A_MASK;
+ break;
+ case GPIO_GENERIC_B:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICB_A_MASK;
+ break;
+ case GPIO_GENERIC_C:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICC_A_MASK;
+ break;
+ case GPIO_GENERIC_D:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICD_A_MASK;
+ break;
+ case GPIO_GENERIC_E:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICE_A_MASK;
+ break;
+ case GPIO_GENERIC_F:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICF_A_MASK;
+ break;
+ case GPIO_GENERIC_G:
+ info->mask = DC_GPIO_GENERIC_A__DC_GPIO_GENERICG_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_HPD:
+ info->offset = REG(DC_GPIO_HPD_A);
+ switch (en) {
+ case GPIO_HPD_1:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK;
+ break;
+ case GPIO_HPD_2:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK;
+ break;
+ case GPIO_HPD_3:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK;
+ break;
+ case GPIO_HPD_4:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK;
+ break;
+ case GPIO_HPD_5:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK;
+ break;
+ case GPIO_HPD_6:
+ info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (en) {
+ case GPIO_SYNC_HSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_HSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_VSYNC_A:
+ info->offset = REG(DC_GPIO_SYNCA_A);
+ info->mask = DC_GPIO_SYNCA_A__DC_GPIO_VSYNCA_A_MASK;
+ break;
+ case GPIO_SYNC_HSYNC_B:
+ case GPIO_SYNC_VSYNC_B:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_GENLK_CLK_A_MASK;
+ break;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask =
+ DC_GPIO_GENLK_A__DC_GPIO_GENLK_VSYNC_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_A:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_A_A_MASK;
+ break;
+ case GPIO_GSL_SWAPLOCK_B:
+ info->offset = REG(DC_GPIO_GENLK_A);
+ info->mask = DC_GPIO_GENLK_A__DC_GPIO_SWAPLOCK_B_A_MASK;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+ break;
+ case GPIO_ID_VIP_PAD:
+ default:
+ ASSERT_CRITICAL(false);
+ result = false;
+ }
+
+ if (result) {
+ info->offset_y = info->offset + 2;
+ info->offset_en = info->offset + 1;
+ info->offset_mask = info->offset - 1;
+
+ info->mask_y = info->mask;
+ info->mask_en = info->mask;
+ info->mask_mask = info->mask;
+ }
+
+ return result;
+}
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = offset_to_id,
+ .id_to_offset = id_to_offset,
+};
+
+/*
+ * dal_hw_translate_dcn10_init
+ *
+ * @brief
+ * Initialize Hw translate function pointers.
+ *
+ * @param
+ * struct hw_translate *tr - [out] struct of function pointers
+ *
+ */
+void dal_hw_translate_dcn10_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h
new file mode 100644
index 000000000000..9edef53c80a0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DCN10_H__
+#define __DAL_HW_TRANSLATE_DCN10_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_dcn10_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
new file mode 100644
index 000000000000..9c4a56c738c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_
+
+#include "gpio_regs.h"
+
+/****************************** new register headers */
+/*** following in header */
+
+#define DDC_GPIO_REG_LIST_ENTRY(type,cd,id) \
+ .type ## _reg = REG(DC_GPIO_DDC ## id ## _ ## type),\
+ .type ## _mask = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_DDC ## id ## _ ## type ## __DC_GPIO_DDC ## id ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_REG_LIST(cd,id) \
+ {\
+ DDC_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(A,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(EN,cd,id),\
+ DDC_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ }
+
+#define DDC_REG_LIST(cd,id) \
+ DDC_GPIO_REG_LIST(cd,id),\
+ .ddc_setup = REG(DC_I2C_DDC ## id ## _SETUP)
+
+#define DDC_GPIO_VGA_REG_LIST_ENTRY(type,cd)\
+ .type ## _reg = REG(DC_GPIO_DDCVGA_ ## type),\
+ .type ## _mask = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_DDCVGA_ ## type ## __DC_GPIO_DDCVGA ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_VGA_REG_LIST(cd) \
+ {\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(MASK,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(A,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(EN,cd),\
+ DDC_GPIO_VGA_REG_LIST_ENTRY(Y,cd)\
+ }
+
+#define DDC_VGA_REG_LIST(cd) \
+ DDC_GPIO_VGA_REG_LIST(cd),\
+ .ddc_setup = mmDC_I2C_DDCVGA_SETUP
+
+#define DDC_GPIO_I2C_REG_LIST_ENTRY(type,cd) \
+ .type ## _reg = REG(DC_GPIO_I2CPAD_ ## type),\
+ .type ## _mask = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_I2CPAD_ ## type ## __DC_GPIO_ ## cd ## _ ## type ## __SHIFT
+
+#define DDC_GPIO_I2C_REG_LIST(cd) \
+ {\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(MASK,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(A,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(EN,cd),\
+ DDC_GPIO_I2C_REG_LIST_ENTRY(Y,cd)\
+ }
+
+#define DDC_I2C_REG_LIST(cd) \
+ DDC_GPIO_I2C_REG_LIST(cd),\
+ .ddc_setup = 0
+
+#define DDC_MASK_SH_LIST(mask_sh) \
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_ENABLE, mask_sh),\
+ SF_DDC(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_EDID_DETECT_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1DATA_PD_EN, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, DC_GPIO_DDC1CLK_PD_EN, mask_sh),\
+ SF_DDC(DC_GPIO_DDC1_MASK, AUX_PAD1_MODE, mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SDA_PD_DIS, mask_sh),\
+ SF_DDC(DC_GPIO_I2CPAD_MASK, DC_GPIO_SCL_PD_DIS, mask_sh)
+
+
+struct ddc_registers {
+ struct gpio_registers gpio;
+ uint32_t ddc_setup;
+};
+
+struct ddc_sh_mask {
+ /* i2c_dd_setup */
+ uint32_t DC_I2C_DDC1_ENABLE;
+ uint32_t DC_I2C_DDC1_EDID_DETECT_ENABLE;
+ uint32_t DC_I2C_DDC1_EDID_DETECT_MODE;
+ /* ddc1_mask */
+ uint32_t DC_GPIO_DDC1DATA_PD_EN;
+ uint32_t DC_GPIO_DDC1CLK_PD_EN;
+ uint32_t AUX_PAD1_MODE;
+ /* i2cpad_mask */
+ uint32_t DC_GPIO_SDA_PD_DIS;
+ uint32_t DC_GPIO_SCL_PD_DIS;
+};
+
+
+
+/*** following in dc_resource */
+
+#define ddc_data_regs(id) \
+{\
+ DDC_REG_LIST(DATA,id)\
+}
+
+#define ddc_clk_regs(id) \
+{\
+ DDC_REG_LIST(CLK,id)\
+}
+
+#define ddc_vga_data_regs \
+{\
+ DDC_VGA_REG_LIST(DATA)\
+}
+
+#define ddc_vga_clk_regs \
+{\
+ DDC_VGA_REG_LIST(CLK)\
+}
+
+#define ddc_i2c_data_regs \
+{\
+ DDC_I2C_REG_LIST(SDA)\
+}
+
+#define ddc_i2c_clk_regs \
+{\
+ DDC_I2C_REG_LIST(SCL)\
+}
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_DDC_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
new file mode 100644
index 000000000000..26695b963c58
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "../hw_factory.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "../hw_gpio.h"
+#include "../hw_ddc.h"
+#include "../hw_hpd.h"
+
+/* function table */
+static const struct hw_factory_funcs funcs = {
+ .create_ddc_data = NULL,
+ .create_ddc_clock = NULL,
+ .create_generic = NULL,
+ .create_hpd = NULL,
+ .create_sync = NULL,
+ .create_gsl = NULL,
+};
+
+void dal_hw_factory_diag_fpga_init(struct hw_factory *factory)
+{
+ factory->number_of_pins[GPIO_ID_DDC_DATA] = 8;
+ factory->number_of_pins[GPIO_ID_DDC_CLOCK] = 8;
+ factory->number_of_pins[GPIO_ID_GENERIC] = 7;
+ factory->number_of_pins[GPIO_ID_HPD] = 6;
+ factory->number_of_pins[GPIO_ID_GPIO_PAD] = 31;
+ factory->number_of_pins[GPIO_ID_VIP_PAD] = 0;
+ factory->number_of_pins[GPIO_ID_SYNC] = 2;
+ factory->number_of_pins[GPIO_ID_GSL] = 4;
+ factory->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
new file mode 100644
index 000000000000..8a74f6adb8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_factory_diag.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_DIAG_FPGA_H__
+#define __DAL_HW_FACTORY_DIAG_FPGA_H__
+
+/* Initialize HW factory function pointers and pin info */
+void dal_hw_factory_diag_fpga_init(struct hw_factory *factory);
+
+#endif /* __DAL_HW_FACTORY_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
new file mode 100644
index 000000000000..bf9068846927
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+
+#include "../hw_translate.h"
+
+/* function table */
+static const struct hw_translate_funcs funcs = {
+ .offset_to_id = NULL,
+ .id_to_offset = NULL,
+};
+
+void dal_hw_translate_diag_fpga_init(struct hw_translate *tr)
+{
+ tr->funcs = &funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
new file mode 100644
index 000000000000..4f053241fe96
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/diagnostics/hw_translate_diag.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_DIAG_FPGA_H__
+#define __DAL_HW_TRANSLATE_DIAG_FPGA_H__
+
+struct hw_translate;
+
+/* Initialize Hw translate function pointers */
+void dal_hw_translate_diag_fpga_init(struct hw_translate *tr);
+
+#endif /* __DAL_HW_TRANSLATE_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
new file mode 100644
index 000000000000..1d1efd72b291
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_interface.h"
+#include "include/gpio_service_interface.h"
+#include "hw_gpio.h"
+#include "hw_translate.h"
+#include "hw_factory.h"
+#include "gpio_service.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Public API
+ */
+
+enum gpio_result dal_gpio_open(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ return dal_gpio_open_ex(gpio, mode);
+}
+
+enum gpio_result dal_gpio_open_ex(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ if (gpio->pin) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_ALREADY_OPENED;
+ }
+
+ gpio->mode = mode;
+
+ return dal_gpio_service_open(
+ gpio->service, gpio->id, gpio->en, mode, &gpio->pin);
+}
+
+enum gpio_result dal_gpio_get_value(
+ const struct gpio *gpio,
+ uint32_t *value)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->get_value(gpio->pin, value);
+}
+
+enum gpio_result dal_gpio_set_value(
+ const struct gpio *gpio,
+ uint32_t value)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->set_value(gpio->pin, value);
+}
+
+enum gpio_mode dal_gpio_get_mode(
+ const struct gpio *gpio)
+{
+ return gpio->mode;
+}
+
+enum gpio_result dal_gpio_change_mode(
+ struct gpio *gpio,
+ enum gpio_mode mode)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->change_mode(gpio->pin, mode);
+}
+
+enum gpio_id dal_gpio_get_id(
+ const struct gpio *gpio)
+{
+ return gpio->id;
+}
+
+uint32_t dal_gpio_get_enum(
+ const struct gpio *gpio)
+{
+ return gpio->en;
+}
+
+enum gpio_result dal_gpio_set_config(
+ struct gpio *gpio,
+ const struct gpio_config_data *config_data)
+{
+ if (!gpio->pin) {
+ BREAK_TO_DEBUGGER();
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ return gpio->pin->funcs->set_config(gpio->pin, config_data);
+}
+
+enum gpio_result dal_gpio_get_pin_info(
+ const struct gpio *gpio,
+ struct gpio_pin_info *pin_info)
+{
+ return gpio->service->translate.funcs->id_to_offset(
+ gpio->id, gpio->en, pin_info) ?
+ GPIO_RESULT_OK : GPIO_RESULT_INVALID_DATA;
+}
+
+enum sync_source dal_gpio_get_sync_source(
+ const struct gpio *gpio)
+{
+ switch (gpio->id) {
+ case GPIO_ID_GENERIC:
+ switch (gpio->en) {
+ case GPIO_GENERIC_A:
+ return SYNC_SOURCE_IO_GENERIC_A;
+ case GPIO_GENERIC_B:
+ return SYNC_SOURCE_IO_GENERIC_B;
+ case GPIO_GENERIC_C:
+ return SYNC_SOURCE_IO_GENERIC_C;
+ case GPIO_GENERIC_D:
+ return SYNC_SOURCE_IO_GENERIC_D;
+ case GPIO_GENERIC_E:
+ return SYNC_SOURCE_IO_GENERIC_E;
+ case GPIO_GENERIC_F:
+ return SYNC_SOURCE_IO_GENERIC_F;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_SYNC:
+ switch (gpio->en) {
+ case GPIO_SYNC_HSYNC_A:
+ return SYNC_SOURCE_IO_HSYNC_A;
+ case GPIO_SYNC_VSYNC_A:
+ return SYNC_SOURCE_IO_VSYNC_A;
+ case GPIO_SYNC_HSYNC_B:
+ return SYNC_SOURCE_IO_HSYNC_B;
+ case GPIO_SYNC_VSYNC_B:
+ return SYNC_SOURCE_IO_VSYNC_B;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_HPD:
+ switch (gpio->en) {
+ case GPIO_HPD_1:
+ return SYNC_SOURCE_IO_HPD1;
+ case GPIO_HPD_2:
+ return SYNC_SOURCE_IO_HPD2;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ case GPIO_ID_GSL:
+ switch (gpio->en) {
+ case GPIO_GSL_GENLOCK_CLOCK:
+ return SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK;
+ case GPIO_GSL_GENLOCK_VSYNC:
+ return SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC;
+ case GPIO_GSL_SWAPLOCK_A:
+ return SYNC_SOURCE_GSL_IO_SWAPLOCK_A;
+ case GPIO_GSL_SWAPLOCK_B:
+ return SYNC_SOURCE_GSL_IO_SWAPLOCK_B;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+ break;
+ default:
+ return SYNC_SOURCE_NONE;
+ }
+}
+
+enum gpio_pin_output_state dal_gpio_get_output_state(
+ const struct gpio *gpio)
+{
+ return gpio->output_state;
+}
+
+void dal_gpio_close(
+ struct gpio *gpio)
+{
+ if (!gpio)
+ return;
+
+ dal_gpio_service_close(gpio->service, &gpio->pin);
+
+ gpio->mode = GPIO_MODE_UNKNOWN;
+}
+
+/*
+ * @brief
+ * Creation and destruction
+ */
+
+struct gpio *dal_gpio_create(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_pin_output_state output_state)
+{
+ struct gpio *gpio = kzalloc(sizeof(struct gpio), GFP_KERNEL);
+
+ if (!gpio) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ gpio->service = service;
+ gpio->pin = NULL;
+ gpio->id = id;
+ gpio->en = en;
+ gpio->mode = GPIO_MODE_UNKNOWN;
+ gpio->output_state = output_state;
+
+ return gpio;
+}
+
+void dal_gpio_destroy(
+ struct gpio **gpio)
+{
+ if (!gpio || !*gpio) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_gpio_close(*gpio);
+
+ kfree(*gpio);
+
+ *gpio = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
new file mode 100644
index 000000000000..5c5925299f8d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_
+
+struct gpio_registers {
+ uint32_t MASK_reg;
+ uint32_t MASK_mask;
+ uint32_t MASK_shift;
+ uint32_t A_reg;
+ uint32_t A_mask;
+ uint32_t A_shift;
+ uint32_t EN_reg;
+ uint32_t EN_mask;
+ uint32_t EN_shift;
+ uint32_t Y_reg;
+ uint32_t Y_mask;
+ uint32_t Y_shift;
+};
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_GPIO_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
new file mode 100644
index 000000000000..80038e0e610f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "dm_services.h"
+#include "include/gpio_interface.h"
+#include "include/gpio_service_interface.h"
+#include "hw_translate.h"
+#include "hw_factory.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "gpio_service.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "hw_gpio.h"
+
+/*
+ * @brief
+ * Public API.
+ */
+
+struct gpio_service *dal_gpio_service_create(
+ enum dce_version dce_version_major,
+ enum dce_version dce_version_minor,
+ struct dc_context *ctx)
+{
+ struct gpio_service *service;
+
+ uint32_t index_of_id;
+
+ service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
+
+ if (!service) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (!dal_hw_translate_init(&service->translate, dce_version_major,
+ dce_version_minor)) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ if (!dal_hw_factory_init(&service->factory, dce_version_major,
+ dce_version_minor)) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ /* allocate and initialize business storage */
+ {
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ index_of_id = 0;
+ service->ctx = ctx;
+
+ do {
+ uint32_t number_of_bits =
+ service->factory.number_of_pins[index_of_id];
+
+ uint32_t number_of_uints =
+ (number_of_bits + bits_per_uint - 1) /
+ bits_per_uint;
+
+ uint32_t *slot;
+
+ if (number_of_bits) {
+ uint32_t index_of_uint = 0;
+
+ slot = kzalloc(number_of_uints * sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!slot) {
+ BREAK_TO_DEBUGGER();
+ goto failure_2;
+ }
+
+ do {
+ slot[index_of_uint] = 0;
+
+ ++index_of_uint;
+ } while (index_of_uint < number_of_uints);
+ } else
+ slot = NULL;
+
+ service->busyness[index_of_id] = slot;
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+ }
+
+ return service;
+
+failure_2:
+ while (index_of_id) {
+ uint32_t *slot;
+
+ --index_of_id;
+
+ slot = service->busyness[index_of_id];
+
+ kfree(slot);
+ }
+
+failure_1:
+ kfree(service);
+
+ return NULL;
+}
+
+struct gpio *dal_gpio_service_create_irq(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask)
+{
+ enum gpio_id id;
+ uint32_t en;
+
+ if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ return dal_gpio_create_irq(service, id, en);
+}
+
+void dal_gpio_service_destroy(
+ struct gpio_service **ptr)
+{
+ if (!ptr || !*ptr) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* free business storage */
+ {
+ uint32_t index_of_id = 0;
+
+ do {
+ uint32_t *slot = (*ptr)->busyness[index_of_id];
+
+ kfree(slot);
+
+ ++index_of_id;
+ } while (index_of_id < GPIO_ID_COUNT);
+ }
+
+ kfree(*ptr);
+
+ *ptr = NULL;
+}
+
+/*
+ * @brief
+ * Private API.
+ */
+
+static bool is_pin_busy(
+ const struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
+
+ return 0 != (*slot & (1 << (en % bits_per_uint)));
+}
+
+static void set_pin_busy(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ service->busyness[id][en / bits_per_uint] |=
+ (1 << (en % bits_per_uint));
+}
+
+static void set_pin_free(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
+
+ service->busyness[id][en / bits_per_uint] &=
+ ~(1 << (en % bits_per_uint));
+}
+
+enum gpio_result dal_gpio_service_open(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_mode mode,
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_gpio_pin *pin;
+
+ if (!service->busyness[id]) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+
+ if (is_pin_busy(service, id, en)) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_DEVICE_BUSY;
+ }
+
+ switch (id) {
+ case GPIO_ID_DDC_DATA:
+ pin = service->factory.funcs->create_ddc_data(
+ service->ctx, id, en);
+ service->factory.funcs->define_ddc_registers(pin, en);
+ break;
+ case GPIO_ID_DDC_CLOCK:
+ pin = service->factory.funcs->create_ddc_clock(
+ service->ctx, id, en);
+ service->factory.funcs->define_ddc_registers(pin, en);
+ break;
+ case GPIO_ID_GENERIC:
+ pin = service->factory.funcs->create_generic(
+ service->ctx, id, en);
+ break;
+ case GPIO_ID_HPD:
+ pin = service->factory.funcs->create_hpd(
+ service->ctx, id, en);
+ service->factory.funcs->define_hpd_registers(pin, en);
+ break;
+ case GPIO_ID_SYNC:
+ pin = service->factory.funcs->create_sync(
+ service->ctx, id, en);
+ break;
+ case GPIO_ID_GSL:
+ pin = service->factory.funcs->create_gsl(
+ service->ctx, id, en);
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ if (!pin) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ if (!pin->funcs->open(pin, mode)) {
+ ASSERT_CRITICAL(false);
+ dal_gpio_service_close(service, &pin);
+ return GPIO_RESULT_OPEN_FAILED;
+ }
+
+ set_pin_busy(service, id, en);
+ *ptr = pin;
+ return GPIO_RESULT_OK;
+}
+
+void dal_gpio_service_close(
+ struct gpio_service *service,
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_gpio_pin *pin;
+
+ if (!ptr) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ pin = *ptr;
+
+ if (pin) {
+ set_pin_free(service, pin->id, pin->en);
+
+ pin->funcs->close(pin);
+
+ pin->funcs->destroy(ptr);
+ }
+}
+
+
+enum dc_irq_source dal_irq_get_source(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1 +
+ dal_gpio_get_enum(irq));
+ case GPIO_ID_GPIO_PAD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_GPIOPAD0 +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+enum dc_irq_source dal_irq_get_rx_source(
+ const struct gpio *irq)
+{
+ enum gpio_id id = dal_gpio_get_id(irq);
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ return (enum dc_irq_source)(DC_IRQ_SOURCE_HPD1RX +
+ dal_gpio_get_enum(irq));
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+enum gpio_result dal_irq_setup_hpd_filter(
+ struct gpio *irq,
+ struct gpio_hpd_config *config)
+{
+ struct gpio_config_data config_data;
+
+ if (!config)
+ return GPIO_RESULT_INVALID_DATA;
+
+ config_data.type = GPIO_CONFIG_TYPE_HPD;
+ config_data.config.hpd = *config;
+
+ return dal_gpio_set_config(irq, &config_data);
+}
+
+/*
+ * @brief
+ * Creation and destruction
+ */
+
+struct gpio *dal_gpio_create_irq(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct gpio *irq;
+
+ switch (id) {
+ case GPIO_ID_HPD:
+ case GPIO_ID_GPIO_PAD:
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ irq = dal_gpio_create(
+ service, id, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (irq)
+ return irq;
+
+ ASSERT_CRITICAL(false);
+ return NULL;
+}
+
+void dal_gpio_destroy_irq(
+ struct gpio **irq)
+{
+ if (!irq || !*irq) {
+ ASSERT_CRITICAL(false);
+ return;
+ }
+
+ dal_gpio_close(*irq);
+ dal_gpio_destroy(irq);
+ kfree(*irq);
+
+ *irq = NULL;
+}
+
+struct ddc *dal_gpio_create_ddc(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask,
+ struct gpio_ddc_hw_info *info)
+{
+ enum gpio_id id;
+ uint32_t en;
+ struct ddc *ddc;
+
+ if (!service->translate.funcs->offset_to_id(offset, mask, &id, &en))
+ return NULL;
+
+ ddc = kzalloc(sizeof(struct ddc), GFP_KERNEL);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ ddc->pin_data = dal_gpio_create(
+ service, GPIO_ID_DDC_DATA, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (!ddc->pin_data) {
+ BREAK_TO_DEBUGGER();
+ goto failure_1;
+ }
+
+ ddc->pin_clock = dal_gpio_create(
+ service, GPIO_ID_DDC_CLOCK, en, GPIO_PIN_OUTPUT_STATE_DEFAULT);
+
+ if (!ddc->pin_clock) {
+ BREAK_TO_DEBUGGER();
+ goto failure_2;
+ }
+
+ ddc->hw_info = *info;
+
+ ddc->ctx = service->ctx;
+
+ return ddc;
+
+failure_2:
+ dal_gpio_destroy(&ddc->pin_data);
+
+failure_1:
+ kfree(ddc);
+
+ return NULL;
+}
+
+void dal_gpio_destroy_ddc(
+ struct ddc **ddc)
+{
+ if (!ddc || !*ddc) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ dal_ddc_close(*ddc);
+ dal_gpio_destroy(&(*ddc)->pin_data);
+ dal_gpio_destroy(&(*ddc)->pin_clock);
+ kfree(*ddc);
+
+ *ddc = NULL;
+}
+
+enum gpio_result dal_ddc_open(
+ struct ddc *ddc,
+ enum gpio_mode mode,
+ enum gpio_ddc_config_type config_type)
+{
+ enum gpio_result result;
+
+ struct gpio_config_data config_data;
+ struct hw_gpio *hw_data;
+ struct hw_gpio *hw_clock;
+
+ result = dal_gpio_open_ex(ddc->pin_data, mode);
+
+ if (result != GPIO_RESULT_OK) {
+ BREAK_TO_DEBUGGER();
+ return result;
+ }
+
+ result = dal_gpio_open_ex(ddc->pin_clock, mode);
+
+ if (result != GPIO_RESULT_OK) {
+ BREAK_TO_DEBUGGER();
+ goto failure;
+ }
+
+ /* DDC clock and data pins should belong
+ * to the same DDC block id,
+ * we use the data pin to set the pad mode. */
+
+ if (mode == GPIO_MODE_INPUT)
+ /* this is from detect_sink_type,
+ * we need extra delay there */
+ config_data.type = GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE;
+ else
+ config_data.type = GPIO_CONFIG_TYPE_DDC;
+
+ config_data.config.ddc.type = config_type;
+
+ hw_data = FROM_HW_GPIO_PIN(ddc->pin_data->pin);
+ hw_clock = FROM_HW_GPIO_PIN(ddc->pin_clock->pin);
+
+ config_data.config.ddc.data_en_bit_present = hw_data->store.en != 0;
+ config_data.config.ddc.clock_en_bit_present = hw_clock->store.en != 0;
+
+ result = dal_gpio_set_config(ddc->pin_data, &config_data);
+
+ if (result == GPIO_RESULT_OK)
+ return result;
+
+ BREAK_TO_DEBUGGER();
+
+ dal_gpio_close(ddc->pin_clock);
+
+failure:
+ dal_gpio_close(ddc->pin_data);
+
+ return result;
+}
+
+enum gpio_result dal_ddc_change_mode(
+ struct ddc *ddc,
+ enum gpio_mode mode)
+{
+ enum gpio_result result;
+
+ enum gpio_mode original_mode =
+ dal_gpio_get_mode(ddc->pin_data);
+
+ result = dal_gpio_change_mode(ddc->pin_data, mode);
+
+ /* [anaumov] DAL2 code returns GPIO_RESULT_NON_SPECIFIC_ERROR
+ * in case of failures;
+ * set_mode() is so that, in case of failure,
+ * we must explicitly set original mode */
+
+ if (result != GPIO_RESULT_OK)
+ goto failure;
+
+ result = dal_gpio_change_mode(ddc->pin_clock, mode);
+
+ if (result == GPIO_RESULT_OK)
+ return result;
+
+ dal_gpio_change_mode(ddc->pin_clock, original_mode);
+
+failure:
+ dal_gpio_change_mode(ddc->pin_data, original_mode);
+
+ return result;
+}
+
+enum gpio_ddc_line dal_ddc_get_line(
+ const struct ddc *ddc)
+{
+ return (enum gpio_ddc_line)dal_gpio_get_enum(ddc->pin_data);
+}
+
+enum gpio_result dal_ddc_set_config(
+ struct ddc *ddc,
+ enum gpio_ddc_config_type config_type)
+{
+ struct gpio_config_data config_data;
+
+ config_data.type = GPIO_CONFIG_TYPE_DDC;
+
+ config_data.config.ddc.type = config_type;
+ config_data.config.ddc.data_en_bit_present = false;
+ config_data.config.ddc.clock_en_bit_present = false;
+
+ return dal_gpio_set_config(ddc->pin_data, &config_data);
+}
+
+void dal_ddc_close(
+ struct ddc *ddc)
+{
+ dal_gpio_close(ddc->pin_clock);
+ dal_gpio_close(ddc->pin_data);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
new file mode 100644
index 000000000000..c7f3081f59cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_SERVICE_H__
+#define __DAL_GPIO_SERVICE_H__
+
+struct hw_translate;
+struct hw_factory;
+
+struct gpio_service {
+ struct dc_context *ctx;
+ struct hw_translate translate;
+ struct hw_factory factory;
+ /*
+ * @brief
+ * Business storage.
+ * For each member of 'enum gpio_id',
+ * store array of bits (packed into uint32_t slots),
+ * index individual bit by 'en' value */
+ uint32_t *busyness[GPIO_ID_COUNT];
+};
+
+enum gpio_result dal_gpio_service_open(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_mode mode,
+ struct hw_gpio_pin **ptr);
+
+void dal_gpio_service_close(
+ struct gpio_service *service,
+ struct hw_gpio_pin **ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
new file mode 100644
index 000000000000..dcfdd71b2304
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_
+
+#include "gpio_regs.h"
+
+#define ONE_MORE_0 1
+#define ONE_MORE_1 2
+#define ONE_MORE_2 3
+#define ONE_MORE_3 4
+#define ONE_MORE_4 5
+#define ONE_MORE_5 6
+
+
+#define HPD_GPIO_REG_LIST_ENTRY(type,cd,id) \
+ .type ## _reg = REG(DC_GPIO_HPD_## type),\
+ .type ## _mask = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## _MASK,\
+ .type ## _shift = DC_GPIO_HPD_ ## type ## __DC_GPIO_HPD ## id ## _ ## type ## __SHIFT
+
+#define HPD_GPIO_REG_LIST(id) \
+ {\
+ HPD_GPIO_REG_LIST_ENTRY(MASK,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(A,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(EN,cd,id),\
+ HPD_GPIO_REG_LIST_ENTRY(Y,cd,id)\
+ }
+
+#define HPD_REG_LIST(id) \
+ HPD_GPIO_REG_LIST(ONE_MORE_ ## id), \
+ .int_status = REGI(DC_HPD_INT_STATUS, HPD, id),\
+ .toggle_filt_cntl = REGI(DC_HPD_TOGGLE_FILT_CNTL, HPD, id)
+
+ #define HPD_MASK_SH_LIST(mask_sh) \
+ SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE_DELAYED, mask_sh),\
+ SF_HPD(DC_HPD_INT_STATUS, DC_HPD_SENSE, mask_sh),\
+ SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_CONNECT_INT_DELAY, mask_sh),\
+ SF_HPD(DC_HPD_TOGGLE_FILT_CNTL, DC_HPD_DISCONNECT_INT_DELAY, mask_sh)
+
+struct hpd_registers {
+ struct gpio_registers gpio;
+ uint32_t int_status;
+ uint32_t toggle_filt_cntl;
+};
+
+struct hpd_sh_mask {
+ /* int_status */
+ uint32_t DC_HPD_SENSE_DELAYED;
+ uint32_t DC_HPD_SENSE;
+ /* toggle_filt_cntl */
+ uint32_t DC_HPD_CONNECT_INT_DELAY;
+ uint32_t DC_HPD_DISCONNECT_INT_DELAY;
+};
+
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_GPIO_HPD_REGS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
new file mode 100644
index 000000000000..310f48965b27
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+#include "hw_ddc.h"
+
+#include "reg_helper.h"
+#include "gpio_regs.h"
+
+
+#undef FN
+#define FN(reg_name, field_name) \
+ ddc->shifts->field_name, ddc->masks->field_name
+
+#define CTX \
+ ddc->base.base.ctx
+#define REG(reg)\
+ (ddc->regs->reg)
+
+static void destruct(
+ struct hw_ddc *pin)
+{
+ dal_hw_gpio_destruct(&pin->base);
+}
+
+static void destroy(
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_ddc *pin = HW_DDC_FROM_BASE(*ptr);
+
+ destruct(pin);
+
+ kfree(pin);
+
+ *ptr = NULL;
+}
+
+static enum gpio_result set_config(
+ struct hw_gpio_pin *ptr,
+ const struct gpio_config_data *config_data)
+{
+ struct hw_ddc *ddc = HW_DDC_FROM_BASE(ptr);
+ struct hw_gpio *hw_gpio = NULL;
+ uint32_t regval;
+ uint32_t ddc_data_pd_en = 0;
+ uint32_t ddc_clk_pd_en = 0;
+ uint32_t aux_pad_mode = 0;
+
+ hw_gpio = &ddc->base;
+
+ if (hw_gpio == NULL) {
+ ASSERT_CRITICAL(false);
+ return GPIO_RESULT_NULL_HANDLE;
+ }
+
+ regval = REG_GET_3(gpio.MASK_reg,
+ DC_GPIO_DDC1DATA_PD_EN, &ddc_data_pd_en,
+ DC_GPIO_DDC1CLK_PD_EN, &ddc_clk_pd_en,
+ AUX_PAD1_MODE, &aux_pad_mode);
+
+ switch (config_data->config.ddc.type) {
+ case GPIO_DDC_CONFIG_TYPE_MODE_I2C:
+ /* On plug-in, there is a transient level on the pad
+ * which must be discharged through the internal pull-down.
+ * Enable internal pull-down, 2.5msec discharge time
+ * is required for detection of AUX mode */
+ if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) {
+ if (!ddc_data_pd_en || !ddc_clk_pd_en) {
+
+ REG_SET_2(gpio.MASK_reg, regval,
+ DC_GPIO_DDC1DATA_PD_EN, 1,
+ DC_GPIO_DDC1CLK_PD_EN, 1);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+ } else {
+ uint32_t reg2;
+ uint32_t sda_pd_dis = 0;
+ uint32_t scl_pd_dis = 0;
+
+ reg2 = REG_GET_2(gpio.MASK_reg,
+ DC_GPIO_SDA_PD_DIS, &sda_pd_dis,
+ DC_GPIO_SCL_PD_DIS, &scl_pd_dis);
+
+ if (sda_pd_dis) {
+ REG_SET(gpio.MASK_reg, regval,
+ DC_GPIO_SDA_PD_DIS, 0);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+
+ if (!scl_pd_dis) {
+ REG_SET(gpio.MASK_reg, regval,
+ DC_GPIO_SCL_PD_DIS, 1);
+
+ if (config_data->type ==
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE)
+ msleep(3);
+ }
+ }
+
+ if (aux_pad_mode) {
+ /* let pins to get de-asserted
+ * before setting pad to I2C mode */
+ if (config_data->config.ddc.data_en_bit_present ||
+ config_data->config.ddc.clock_en_bit_present)
+ /* [anaumov] in DAL2, there was
+ * dc_service_delay_in_microseconds(2000); */
+ msleep(2);
+
+ /* set the I2C pad mode */
+ /* read the register again,
+ * some bits may have been changed */
+ REG_UPDATE(gpio.MASK_reg,
+ AUX_PAD1_MODE, 0);
+ }
+
+ return GPIO_RESULT_OK;
+ case GPIO_DDC_CONFIG_TYPE_MODE_AUX:
+ /* set the AUX pad mode */
+ if (!aux_pad_mode) {
+ REG_SET(gpio.MASK_reg, regval,
+ AUX_PAD1_MODE, 1);
+ }
+
+ return GPIO_RESULT_OK;
+ case GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_3(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_MODE, 0);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ case GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_3(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 1,
+ DC_I2C_DDC1_EDID_DETECT_MODE, 1);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ case GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING:
+ if ((hw_gpio->base.en >= GPIO_DDC_LINE_DDC1) &&
+ (hw_gpio->base.en <= GPIO_DDC_LINE_DDC_VGA)) {
+ REG_UPDATE_2(ddc_setup,
+ DC_I2C_DDC1_ENABLE, 0,
+ DC_I2C_DDC1_EDID_DETECT_ENABLE, 0);
+ return GPIO_RESULT_OK;
+ }
+ break;
+ }
+
+ BREAK_TO_DEBUGGER();
+
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+}
+
+static const struct hw_gpio_pin_funcs funcs = {
+ .destroy = destroy,
+ .open = dal_hw_gpio_open,
+ .get_value = dal_hw_gpio_get_value,
+ .set_value = dal_hw_gpio_set_value,
+ .set_config = set_config,
+ .change_mode = dal_hw_gpio_change_mode,
+ .close = dal_hw_gpio_close,
+};
+
+static void construct(
+ struct hw_ddc *ddc,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_gpio_construct(&ddc->base, id, en, ctx);
+ ddc->base.base.funcs = &funcs;
+}
+
+struct hw_gpio_pin *dal_hw_ddc_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct hw_ddc *pin;
+
+ if ((en < GPIO_DDC_LINE_MIN) || (en > GPIO_DDC_LINE_MAX)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ pin = kzalloc(sizeof(struct hw_ddc), GFP_KERNEL);
+ if (!pin) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(pin, id, en, ctx);
+ return &pin->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
new file mode 100644
index 000000000000..9690e2a885d7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_DDC_H__
+#define __DAL_HW_DDC_H__
+
+#include "ddc_regs.h"
+
+struct hw_ddc {
+ struct hw_gpio base;
+ const struct ddc_registers *regs;
+ const struct ddc_sh_mask *shifts;
+ const struct ddc_sh_mask *masks;
+};
+
+#define HW_DDC_FROM_BASE(hw_gpio) \
+ container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_ddc, base)
+
+struct hw_gpio_pin *dal_hw_ddc_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
new file mode 100644
index 000000000000..87b580fa4bc9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "hw_factory.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce80/hw_factory_dce80.h"
+#include "dce110/hw_factory_dce110.h"
+#include "dce120/hw_factory_dce120.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/hw_factory_dcn10.h"
+#endif
+
+#include "diagnostics/hw_factory_diag.h"
+
+/*
+ * This unit
+ */
+
+bool dal_hw_factory_init(
+ struct hw_factory *factory,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment)
+{
+ if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
+ dal_hw_factory_diag_fpga_init(factory);
+ return true;
+ }
+
+ switch (dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ dal_hw_factory_dce80_init(factory);
+ return true;
+
+ case DCE_VERSION_10_0:
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+ dal_hw_factory_dce110_init(factory);
+ return true;
+ case DCE_VERSION_12_0:
+ dal_hw_factory_dce120_init(factory);
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ dal_hw_factory_dcn10_init(factory);
+ return true;
+#endif
+
+ default:
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+}
+
+void dal_hw_factory_destroy(
+ struct dc_context *ctx,
+ struct hw_factory **factory)
+{
+ if (!factory || !*factory) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ kfree(*factory);
+
+ *factory = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
new file mode 100644
index 000000000000..6e4dd3521935
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_FACTORY_H__
+#define __DAL_HW_FACTORY_H__
+
+struct hw_gpio_pin;
+struct hw_hpd;
+
+struct hw_factory {
+ uint32_t number_of_pins[GPIO_ID_COUNT];
+
+ const struct hw_factory_funcs {
+ struct hw_gpio_pin *(*create_ddc_data)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_ddc_clock)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_generic)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_hpd)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_sync)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gsl)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ void (*define_hpd_registers)(
+ struct hw_gpio_pin *pin,
+ uint32_t en);
+ void (*define_ddc_registers)(
+ struct hw_gpio_pin *pin,
+ uint32_t en);
+ } *funcs;
+};
+
+bool dal_hw_factory_init(
+ struct hw_factory *factory,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
new file mode 100644
index 000000000000..660510842ecf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+
+#include "reg_helper.h"
+#include "gpio_regs.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ gpio->regs->field_name ## _shift, gpio->regs->field_name ## _mask
+
+#define CTX \
+ gpio->base.ctx
+#define REG(reg)\
+ (gpio->regs->reg)
+
+static void store_registers(
+ struct hw_gpio *gpio)
+{
+ REG_GET(MASK_reg, MASK, &gpio->store.mask);
+ REG_GET(A_reg, A, &gpio->store.a);
+ REG_GET(EN_reg, EN, &gpio->store.en);
+ /* TODO store GPIO_MUX_CONTROL if we ever use it */
+}
+
+static void restore_registers(
+ struct hw_gpio *gpio)
+{
+ REG_UPDATE(MASK_reg, MASK, gpio->store.mask);
+ REG_UPDATE(A_reg, A, gpio->store.a);
+ REG_UPDATE(EN_reg, EN, gpio->store.en);
+ /* TODO restore GPIO_MUX_CONTROL if we ever use it */
+}
+
+bool dal_hw_gpio_open(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ store_registers(pin);
+
+ ptr->opened = (dal_hw_gpio_config_mode(pin, mode) == GPIO_RESULT_OK);
+
+ return ptr->opened;
+}
+
+enum gpio_result dal_hw_gpio_get_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t *value)
+{
+ const struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
+
+ enum gpio_result result = GPIO_RESULT_OK;
+
+ switch (ptr->mode) {
+ case GPIO_MODE_INPUT:
+ case GPIO_MODE_OUTPUT:
+ case GPIO_MODE_HARDWARE:
+ case GPIO_MODE_FAST_OUTPUT:
+ REG_GET(Y_reg, Y, value);
+ break;
+ default:
+ result = GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+
+ return result;
+}
+
+enum gpio_result dal_hw_gpio_set_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t value)
+{
+ struct hw_gpio *gpio = FROM_HW_GPIO_PIN(ptr);
+
+ /* This is the public interface
+ * where the input comes from client, not shifted yet
+ * (because client does not know the shifts). */
+
+ switch (ptr->mode) {
+ case GPIO_MODE_OUTPUT:
+ REG_UPDATE(A_reg, A, value);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_FAST_OUTPUT:
+ /* We use (EN) to faster switch (used in DDC GPIO).
+ * So (A) is grounded, output is driven by (EN = 0)
+ * to pull the line down (output == 0) and (EN=1)
+ * then output is tri-state */
+ REG_UPDATE(EN_reg, EN, ~value);
+ return GPIO_RESULT_OK;
+ default:
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+}
+
+enum gpio_result dal_hw_gpio_change_mode(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ return dal_hw_gpio_config_mode(pin, mode);
+}
+
+void dal_hw_gpio_close(
+ struct hw_gpio_pin *ptr)
+{
+ struct hw_gpio *pin = FROM_HW_GPIO_PIN(ptr);
+
+ restore_registers(pin);
+
+ ptr->mode = GPIO_MODE_UNKNOWN;
+ ptr->opened = false;
+}
+
+enum gpio_result dal_hw_gpio_config_mode(
+ struct hw_gpio *gpio,
+ enum gpio_mode mode)
+{
+ gpio->base.mode = mode;
+
+ switch (mode) {
+ case GPIO_MODE_INPUT:
+ /* turn off output enable, act as input pin;
+ * program the pin as GPIO, mask out signal driven by HW */
+ REG_UPDATE(EN_reg, EN, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_OUTPUT:
+ /* turn on output enable, act as output pin;
+ * program the pin as GPIO, mask out signal driven by HW */
+ REG_UPDATE(A_reg, A, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_FAST_OUTPUT:
+ /* grounding the A register then use the EN register bit
+ * will have faster effect on the rise time */
+ REG_UPDATE(A_reg, A, 0);
+ REG_UPDATE(MASK_reg, MASK, 1);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_HARDWARE:
+ /* program the pin as tri-state, pin is driven by HW */
+ REG_UPDATE(MASK_reg, MASK, 0);
+ return GPIO_RESULT_OK;
+ case GPIO_MODE_INTERRUPT:
+ /* Interrupt mode supported only by HPD (IrqGpio) pins. */
+ REG_UPDATE(MASK_reg, MASK, 0);
+ return GPIO_RESULT_OK;
+ default:
+ return GPIO_RESULT_NON_SPECIFIC_ERROR;
+ }
+}
+
+void dal_hw_gpio_construct(
+ struct hw_gpio *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ pin->base.ctx = ctx;
+ pin->base.id = id;
+ pin->base.en = en;
+ pin->base.mode = GPIO_MODE_UNKNOWN;
+ pin->base.opened = false;
+
+ pin->store.mask = 0;
+ pin->store.a = 0;
+ pin->store.en = 0;
+ pin->store.mux = 0;
+
+ pin->mux_supported = false;
+}
+
+void dal_hw_gpio_destruct(
+ struct hw_gpio *pin)
+{
+ ASSERT(!pin->base.opened);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
new file mode 100644
index 000000000000..bca0cef18ff9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_GPIO_H__
+#define __DAL_HW_GPIO_H__
+
+#include "gpio_regs.h"
+
+#define FROM_HW_GPIO_PIN(ptr) \
+ container_of((ptr), struct hw_gpio, base)
+
+struct addr_mask {
+ uint32_t addr;
+ uint32_t mask;
+};
+
+struct hw_gpio_pin {
+ const struct hw_gpio_pin_funcs *funcs;
+ enum gpio_id id;
+ uint32_t en;
+ enum gpio_mode mode;
+ bool opened;
+ struct dc_context *ctx;
+};
+
+struct hw_gpio_pin_funcs {
+ void (*destroy)(
+ struct hw_gpio_pin **ptr);
+ bool (*open)(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+ enum gpio_result (*get_value)(
+ const struct hw_gpio_pin *pin,
+ uint32_t *value);
+ enum gpio_result (*set_value)(
+ const struct hw_gpio_pin *pin,
+ uint32_t value);
+ enum gpio_result (*set_config)(
+ struct hw_gpio_pin *pin,
+ const struct gpio_config_data *config_data);
+ enum gpio_result (*change_mode)(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+ void (*close)(
+ struct hw_gpio_pin *pin);
+};
+
+
+struct hw_gpio;
+
+/* Register indices are represented by member variables
+ * and are to be filled in by constructors of derived classes.
+ * These members permit the use of common code
+ * for programming registers, where the sequence is the same
+ * but register sets are different.
+ * Some GPIOs have HW mux which allows to choose
+ * what is the source of the signal in HW mode */
+
+struct hw_gpio_pin_reg {
+ struct addr_mask DC_GPIO_DATA_MASK;
+ struct addr_mask DC_GPIO_DATA_A;
+ struct addr_mask DC_GPIO_DATA_EN;
+ struct addr_mask DC_GPIO_DATA_Y;
+};
+
+struct hw_gpio_mux_reg {
+ struct addr_mask GPIO_MUX_CONTROL;
+ struct addr_mask GPIO_MUX_STEREO_SEL;
+};
+
+struct hw_gpio {
+ struct hw_gpio_pin base;
+
+ /* variables to save register value */
+ struct {
+ uint32_t mask;
+ uint32_t a;
+ uint32_t en;
+ uint32_t mux;
+ } store;
+
+ /* GPIO MUX support */
+ bool mux_supported;
+ const struct gpio_registers *regs;
+};
+
+#define HW_GPIO_FROM_BASE(hw_gpio_pin) \
+ container_of((hw_gpio_pin), struct hw_gpio, base)
+
+void dal_hw_gpio_construct(
+ struct hw_gpio *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx);
+
+bool dal_hw_gpio_open(
+ struct hw_gpio_pin *pin,
+ enum gpio_mode mode);
+
+enum gpio_result dal_hw_gpio_get_value(
+ const struct hw_gpio_pin *pin,
+ uint32_t *value);
+
+enum gpio_result dal_hw_gpio_config_mode(
+ struct hw_gpio *pin,
+ enum gpio_mode mode);
+
+void dal_hw_gpio_destruct(
+ struct hw_gpio *pin);
+
+enum gpio_result dal_hw_gpio_set_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t value);
+
+enum gpio_result dal_hw_gpio_change_mode(
+ struct hw_gpio_pin *ptr,
+ enum gpio_mode mode);
+
+void dal_hw_gpio_close(
+ struct hw_gpio_pin *ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
new file mode 100644
index 000000000000..784feccc5853
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/gpio_types.h"
+#include "hw_gpio.h"
+#include "hw_hpd.h"
+
+#include "reg_helper.h"
+#include "hpd_regs.h"
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hpd->shifts->field_name, hpd->masks->field_name
+
+#define CTX \
+ hpd->base.base.ctx
+#define REG(reg)\
+ (hpd->regs->reg)
+
+static void dal_hw_hpd_construct(
+ struct hw_hpd *pin,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_gpio_construct(&pin->base, id, en, ctx);
+}
+
+static void dal_hw_hpd_destruct(
+ struct hw_hpd *pin)
+{
+ dal_hw_gpio_destruct(&pin->base);
+}
+
+
+static void destruct(
+ struct hw_hpd *hpd)
+{
+ dal_hw_hpd_destruct(hpd);
+}
+
+static void destroy(
+ struct hw_gpio_pin **ptr)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(*ptr);
+
+ destruct(hpd);
+
+ kfree(hpd);
+
+ *ptr = NULL;
+}
+
+static enum gpio_result get_value(
+ const struct hw_gpio_pin *ptr,
+ uint32_t *value)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
+ uint32_t hpd_delayed = 0;
+
+ /* in Interrupt mode we ask for SENSE bit */
+
+ if (ptr->mode == GPIO_MODE_INTERRUPT) {
+
+ REG_GET(int_status,
+ DC_HPD_SENSE_DELAYED, &hpd_delayed);
+
+ *value = hpd_delayed;
+ return GPIO_RESULT_OK;
+ }
+
+ /* in any other modes, operate as normal GPIO */
+
+ return dal_hw_gpio_get_value(ptr, value);
+}
+
+static enum gpio_result set_config(
+ struct hw_gpio_pin *ptr,
+ const struct gpio_config_data *config_data)
+{
+ struct hw_hpd *hpd = HW_HPD_FROM_BASE(ptr);
+
+ if (!config_data)
+ return GPIO_RESULT_INVALID_DATA;
+
+ REG_UPDATE_2(toggle_filt_cntl,
+ DC_HPD_CONNECT_INT_DELAY, config_data->config.hpd.delay_on_connect / 10,
+ DC_HPD_DISCONNECT_INT_DELAY, config_data->config.hpd.delay_on_disconnect / 10);
+
+ return GPIO_RESULT_OK;
+}
+
+static const struct hw_gpio_pin_funcs funcs = {
+ .destroy = destroy,
+ .open = dal_hw_gpio_open,
+ .get_value = get_value,
+ .set_value = dal_hw_gpio_set_value,
+ .set_config = set_config,
+ .change_mode = dal_hw_gpio_change_mode,
+ .close = dal_hw_gpio_close,
+};
+
+static void construct(
+ struct hw_hpd *hpd,
+ enum gpio_id id,
+ uint32_t en,
+ struct dc_context *ctx)
+{
+ dal_hw_hpd_construct(hpd, id, en, ctx);
+ hpd->base.base.funcs = &funcs;
+}
+
+struct hw_gpio_pin *dal_hw_hpd_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en)
+{
+ struct hw_hpd *hpd;
+
+ if (id != GPIO_ID_HPD) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ if ((en < GPIO_HPD_MIN) || (en > GPIO_HPD_MAX)) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ hpd = kzalloc(sizeof(struct hw_hpd), GFP_KERNEL);
+ if (!hpd) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(hpd, id, en, ctx);
+ return &hpd->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
new file mode 100644
index 000000000000..4ab7a208f781
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_HPD_H__
+#define __DAL_HW_HPD_H__
+
+#include "hpd_regs.h"
+
+struct hw_hpd {
+ struct hw_gpio base;
+ const struct hpd_registers *regs;
+ const struct hpd_sh_mask *shifts;
+ const struct hpd_sh_mask *masks;
+};
+
+#define HW_HPD_FROM_BASE(hw_gpio) \
+ container_of((HW_GPIO_FROM_BASE(hw_gpio)), struct hw_hpd, base)
+
+struct hw_gpio_pin *dal_hw_hpd_create(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
new file mode 100644
index 000000000000..0ae8ace25739
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/gpio_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "hw_translate.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce80/hw_translate_dce80.h"
+#include "dce110/hw_translate_dce110.h"
+#include "dce120/hw_translate_dce120.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/hw_translate_dcn10.h"
+#endif
+
+#include "diagnostics/hw_translate_diag.h"
+
+/*
+ * This unit
+ */
+
+bool dal_hw_translate_init(
+ struct hw_translate *translate,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment)
+{
+ if (IS_FPGA_MAXIMUS_DC(dce_environment)) {
+ dal_hw_translate_diag_fpga_init(translate);
+ return true;
+ }
+
+ switch (dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ dal_hw_translate_dce80_init(translate);
+ return true;
+ case DCE_VERSION_10_0:
+ case DCE_VERSION_11_0:
+ case DCE_VERSION_11_2:
+ dal_hw_translate_dce110_init(translate);
+ return true;
+ case DCE_VERSION_12_0:
+ dal_hw_translate_dce120_init(translate);
+ return true;
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ dal_hw_translate_dcn10_init(translate);
+ return true;
+#endif
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
new file mode 100644
index 000000000000..3a7d89ca1605
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_TRANSLATE_H__
+#define __DAL_HW_TRANSLATE_H__
+
+struct hw_translate_funcs {
+ bool (*offset_to_id)(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en);
+ bool (*id_to_offset)(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info);
+};
+
+struct hw_translate {
+ const struct hw_translate_funcs *funcs;
+};
+
+bool dal_hw_translate_init(
+ struct hw_translate *translate,
+ enum dce_version dce_version,
+ enum dce_environment dce_environment);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
new file mode 100644
index 000000000000..55603400acd9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -0,0 +1,78 @@
+#
+# Makefile for the 'i2c' sub-component of DAL.
+# It provides the control and status of HW i2c engine of the adapter.
+
+I2CAUX = aux_engine.o engine_base.o i2caux.o i2c_engine.o \
+ i2c_generic_hw_engine.o i2c_hw_engine.o i2c_sw_engine.o
+
+AMD_DAL_I2CAUX = $(addprefix $(AMDDALPATH)/dc/i2caux/,$(I2CAUX))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX)
+
+###############################################################################
+# DCE 8x family
+###############################################################################
+I2CAUX_DCE80 = i2caux_dce80.o i2c_hw_engine_dce80.o \
+ i2c_sw_engine_dce80.o
+
+AMD_DAL_I2CAUX_DCE80 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce80/,$(I2CAUX_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE80)
+
+###############################################################################
+# DCE 100 family
+###############################################################################
+I2CAUX_DCE100 = i2caux_dce100.o
+
+AMD_DAL_I2CAUX_DCE100 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce100/,$(I2CAUX_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE100)
+
+###############################################################################
+# DCE 110 family
+###############################################################################
+I2CAUX_DCE110 = i2caux_dce110.o i2c_sw_engine_dce110.o i2c_hw_engine_dce110.o \
+ aux_engine_dce110.o
+
+AMD_DAL_I2CAUX_DCE110 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce110/,$(I2CAUX_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE110)
+
+###############################################################################
+# DCE 112 family
+###############################################################################
+I2CAUX_DCE112 = i2caux_dce112.o
+
+AMD_DAL_I2CAUX_DCE112 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce112/,$(I2CAUX_DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
+
+###############################################################################
+# DCN 1.0 family
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+I2CAUX_DCN1 = i2caux_dcn10.o
+
+AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCN1)
+endif
+
+###############################################################################
+# DCE 120 family
+###############################################################################
+I2CAUX_DCE120 = i2caux_dce120.o
+
+AMD_DAL_I2CAUX_DCE120 = $(addprefix $(AMDDALPATH)/dc/i2caux/dce120/,$(I2CAUX_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE120)
+
+###############################################################################
+# Diagnostics on FPGA
+###############################################################################
+I2CAUX_DIAG = i2caux_diag.o
+
+AMD_DAL_I2CAUX_DIAG = $(addprefix $(AMDDALPATH)/dc/i2caux/diagnostics/,$(I2CAUX_DIAG))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DIAG)
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
new file mode 100644
index 000000000000..fc7a7d4ebca5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "aux_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "include/link_service_types.h"
+
+/*
+ * This unit
+ */
+
+enum {
+ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+ AUX_TIMED_OUT_RETRY_COUNTER = 2,
+ AUX_DEFER_RETRY_COUNTER = 6
+};
+
+#define FROM_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine, base)
+
+enum i2caux_engine_type dal_aux_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+bool dal_aux_engine_acquire(
+ struct engine *engine,
+ struct ddc *ddc)
+{
+ struct aux_engine *aux_engine = FROM_ENGINE(engine);
+
+ enum gpio_result result;
+ if (aux_engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+ if (!aux_engine->funcs->is_engine_available(aux_engine)) {
+ return false;
+ }
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ if (!aux_engine->funcs->acquire_engine(aux_engine)) {
+ dal_ddc_close(ddc);
+ return false;
+ }
+
+ engine->ddc = ddc;
+
+ return true;
+}
+
+struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+ uint32_t offset;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t invalid_reply_retry_aux_on_ack;
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+static void process_read_reply(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->defer_retry_aux = 0;
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static void process_read_request(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else {
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->buffer;
+
+ process_read_reply(engine, ctx);
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here */
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static bool read_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct read_command_context ctx;
+
+ ctx.buffer = request->payload.data;
+ ctx.current_read_length = request->payload.length;
+ ctx.offset = 0;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.invalid_reply_retry_aux_on_ack = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ do {
+ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+ ctx.request.data = ctx.buffer + ctx.offset;
+ ctx.request.length = ctx.current_read_length;
+
+ process_read_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ return ctx.operation_succeeded;
+}
+
+struct write_command_context {
+ bool mot;
+
+ uint8_t *buffer;
+ uint32_t current_write_length;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t max_defer_retry;
+ uint32_t ack_m_retry;
+
+ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+static void process_write_reply(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->operation_succeeded = true;
+
+ if (ctx->returned_byte) {
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ctx->current_write_length = 0;
+
+ ++ctx->ack_m_retry;
+
+ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(300);
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->defer_retry_aux = 0;
+ ctx->ack_m_retry = 0;
+ ctx->transaction_complete = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+ ctx->current_write_length = 0;
+
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static void process_write_request(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->reply_data;
+
+ process_write_reply(engine, ctx);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here */
+ }
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+
+static bool write_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct write_command_context ctx;
+
+ ctx.mot = middle_of_transaction;
+ ctx.buffer = request->payload.data;
+ ctx.current_write_length = request->payload.length;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.ack_m_retry = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ ctx.max_defer_retry =
+ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+ do {
+ ctx.request.data = ctx.buffer;
+ ctx.request.length = ctx.current_write_length;
+
+ process_write_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ return ctx.operation_succeeded;
+}
+
+static bool end_of_transaction_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request)
+{
+ struct i2caux_transaction_request dummy_request;
+ uint8_t dummy_data;
+
+ /* [tcheng] We only need to send the stop (read with MOT = 0)
+ * for I2C-over-Aux, not native AUX */
+
+ if (request->payload.address_space !=
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+ return false;
+
+ dummy_request.operation = request->operation;
+ dummy_request.payload.address_space = request->payload.address_space;
+ dummy_request.payload.address = request->payload.address;
+
+ /*
+ * Add a dummy byte due to some receiver quirk
+ * where one byte is sent along with MOT = 0.
+ * Ideally this should be 0.
+ */
+
+ dummy_request.payload.length = 0;
+ dummy_request.payload.data = &dummy_data;
+
+ if (request->operation == I2CAUX_TRANSACTION_READ)
+ return read_command(engine, &dummy_request, false);
+ else
+ return write_command(engine, &dummy_request, false);
+
+ /* according Syed, it does not need now DoDummyMOT */
+}
+
+bool dal_aux_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct aux_engine *aux_engine = FROM_ENGINE(engine);
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+ result = read_command(aux_engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+ result = write_command(aux_engine, request, mot_used);
+ break;
+ default:
+ result = false;
+ }
+
+ /* [tcheng]
+ * need to send stop for the last transaction to free up the AUX
+ * if the above command fails, this would be the last transaction */
+
+ if (!middle_of_transaction || !result)
+ end_of_transaction_command(aux_engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+}
+
+void dal_aux_engine_construct(
+ struct aux_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct_engine(&engine->base, ctx);
+ engine->delay = 0;
+ engine->max_defer_write_retry = 0;
+}
+
+void dal_aux_engine_destruct(
+ struct aux_engine *engine)
+{
+ dal_i2caux_destruct_engine(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
new file mode 100644
index 000000000000..8e71324ccb10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/aux_engine.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+enum aux_transaction_type {
+ AUX_TRANSACTION_TYPE_DP,
+ AUX_TRANSACTION_TYPE_I2C
+};
+
+struct aux_request_transaction_data {
+ enum aux_transaction_type type;
+ enum i2caux_transaction_action action;
+ /* 20-bit AUX channel transaction address */
+ uint32_t address;
+ /* delay, in 100-microsecond units */
+ uint8_t delay;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_transaction_reply {
+ AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
+ AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+
+ AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
+ AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
+ AUX_TRANSACTION_REPLY_I2C_DEFER = 0x20,
+
+ AUX_TRANSACTION_REPLY_INVALID = 0xFF
+};
+
+struct aux_reply_transaction_data {
+ enum aux_transaction_reply status;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum aux_channel_operation_result {
+ AUX_CHANNEL_OPERATION_SUCCEEDED,
+ AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN,
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY,
+ AUX_CHANNEL_OPERATION_FAILED_TIMEOUT
+};
+
+struct aux_engine;
+
+struct aux_engine_funcs {
+ void (*destroy)(
+ struct aux_engine **ptr);
+ bool (*acquire_engine)(
+ struct aux_engine *engine);
+ void (*configure)(
+ struct aux_engine *engine,
+ union aux_config cfg);
+ void (*submit_channel_request)(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+ bool (*is_engine_available) (
+ struct aux_engine *engine);
+};
+
+struct aux_engine {
+ struct engine base;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+
+ bool acquire_reset;
+};
+
+void dal_aux_engine_construct(
+ struct aux_engine *engine,
+ struct dc_context *ctx);
+
+void dal_aux_engine_destruct(
+ struct aux_engine *engine);
+bool dal_aux_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+bool dal_aux_engine_acquire(
+ struct engine *ptr,
+ struct ddc *ddc);
+enum i2caux_engine_type dal_aux_engine_get_engine_type(
+ const struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
new file mode 100644
index 000000000000..e8d3781deaed
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "dce/dce_10_0_d.h"
+#include "dce/dce_10_0_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce100_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dce100_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE100(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE100(_MASK)
+};
+
+struct i2caux *dal_i2caux_dce100_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce100_aux_regs,
+ dce100_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
new file mode 100644
index 000000000000..2b508d3e0ef4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce100/i2caux_dce100.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE100_H__
+#define __DAL_I2C_AUX_DCE100_H__
+
+struct i2caux *dal_i2caux_dce100_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE100_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
new file mode 100644
index 000000000000..81f9f3e34c10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../aux_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "aux_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+ aux110->base.base.ctx
+#define REG(reg_name)\
+ (aux110->regs->reg_name)
+#include "reg_helper.h"
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct aux_engine *'
+ * to 'struct aux_engine_dce110 *'
+ */
+#define FROM_AUX_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine_dce110, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct aux_engine_dce110 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_ENGINE(engine);
+
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+static void destruct(
+ struct aux_engine_dce110 *engine);
+
+static void destroy(
+ struct aux_engine **aux_engine)
+{
+ struct aux_engine_dce110 *engine = FROM_AUX_ENGINE(*aux_engine);
+
+ destruct(engine);
+
+ kfree(engine);
+
+ *aux_engine = NULL;
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+ if (field == DMCU_CAN_ACCESS_AUX)
+ return false;
+ /* enable AUX before request SW to access AUX */
+ value = REG_READ(AUX_CONTROL);
+ field = get_reg_field_value(value,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (field == 0) {
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*DP_AUX block as part of the enable sequence*/
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_RESET);
+ }
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*poll HW to make sure reset it done*/
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+ 1, 11);
+
+ set_reg_field_value(
+ value,
+ 0,
+ AUX_CONTROL,
+ AUX_RESET);
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+ 1, 11);
+ }
+ } /*if (field)*/
+
+ /* request SW to access AUX */
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+ value = REG_READ(AUX_ARB_CONTROL);
+ field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+ ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+ ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+ (0xFF & (address))
+
+static void submit_channel_request(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t value;
+ uint32_t length;
+
+ bool is_write =
+ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+ * the 4 bit header and the 20 bit address
+ * (that is 3 byte).
+ * If the requested length is non zero this means
+ * an addition byte specifying the length is required. */
+
+ length = request->length ? 4 : 3;
+ if (is_write)
+ length += request->length;
+
+ REG_UPDATE_2(AUX_SW_CONTROL,
+ AUX_SW_START_DELAY, request->delay,
+ AUX_SW_WR_BYTES, length);
+
+ /* program action and address and payload data (if 'is_write') */
+ value = REG_UPDATE_4(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_DATA_RW, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+ value = REG_SET_2(AUX_SW_DATA, value,
+ AUX_SW_AUTOINCREMENT_DISABLE, 0,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+ if (request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->length - 1);
+ }
+
+ if (is_write) {
+ /* Load the HW buffer with the Data to be sent.
+ * This is relevant for write operation.
+ * For read, the data recived data will be
+ * processed in process_channel_reply(). */
+ uint32_t i = 0;
+
+ while (i < request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->data[i]);
+
+ ++i;
+ }
+ }
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ /* Need to do a read to get the number of bytes to process
+ * Alternatively, this information can be passed -
+ * but that causes coupling which isn't good either. */
+
+ uint32_t bytes_replied;
+ uint32_t value;
+
+ value = REG_GET(AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT, &bytes_replied);
+
+ if (bytes_replied) {
+ uint32_t reply_result;
+
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA,
+ AUX_SW_DATA, &reply_result);
+
+ reply_result = reply_result >> 4;
+
+ switch (reply_result) {
+ case 0: /* ACK */ {
+ uint32_t i = 0;
+
+ /* first byte was already used
+ * to get the command status */
+ --bytes_replied;
+
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
+
+ REG_GET(AUX_SW_DATA,
+ AUX_SW_DATA, &aux_sw_data_val);
+
+ reply->data[i] = aux_sw_data_val;
+ ++i;
+ }
+
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+ }
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+ break;
+ case 2: /* DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+ break;
+ case 4: /* AUX ACK / I2C NACK */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+ break;
+ case 8: /* AUX ACK / I2C DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+ break;
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+ } else {
+ /* Need to handle an error case...
+ * hopefully, upper layer function won't call this function
+ * if the number of bytes in the reply was 0
+ * because there was surely an error that was asserted
+ * that should have been handled
+ * for hot plug case, this could happens*/
+ if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ ASSERT_CRITICAL(false);
+ }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value;
+
+ if (returned_bytes == NULL) {
+ /*caller pass NULL pointer*/
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+ }
+ *returned_bytes = 0;
+
+ /* poll to make sure that SW_DONE is asserted */
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+ *
+ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+ * HW debugging bit and should be ignored. */
+ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+ (value &
+ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+ *returned_bytes = get_reg_field_value(value,
+ AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT);
+
+ if (*returned_bytes == 0)
+ return
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+ else {
+ *returned_bytes -= 1;
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+ /*time_elapsed >= aux_engine->timeout_period */
+ if (!(value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ ASSERT_CRITICAL(false);
+
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+ .destroy = destroy,
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .submit_request = dal_aux_engine_submit_request,
+ .get_engine_type = dal_aux_engine_get_engine_type,
+ .acquire = dal_aux_engine_acquire,
+};
+
+static void construct(
+ struct aux_engine_dce110 *engine,
+ const struct aux_engine_dce110_init_data *aux_init_data)
+{
+ dal_aux_engine_construct(&engine->base, aux_init_data->ctx);
+ engine->base.base.funcs = &engine_funcs;
+ engine->base.funcs = &aux_engine_funcs;
+
+ engine->timeout_period = aux_init_data->timeout_period;
+ engine->regs = aux_init_data->regs;
+}
+
+static void destruct(
+ struct aux_engine_dce110 *engine)
+{
+ struct aux_engine_dce110 *aux110 = engine;
+/*temp w/a, to do*/
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_DMCU_DONE_USING_AUX_REG, 1);
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+ dal_aux_engine_destruct(&engine->base);
+}
+
+struct aux_engine *dal_aux_engine_dce110_create(
+ const struct aux_engine_dce110_init_data *aux_init_data)
+{
+ struct aux_engine_dce110 *engine;
+
+ if (!aux_init_data) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(*engine), GFP_KERNEL);
+
+ if (!engine) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine, aux_init_data);
+ return &engine->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
new file mode 100644
index 000000000000..85ee82162590
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+
+#include "../aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_DATA, DP_AUX, id), \
+ SRI(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_STATUS, DP_AUX, id), \
+ SR(AUXN_IMPCAL), \
+ SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_ARB_CONTROL;
+ uint32_t AUX_SW_DATA;
+ uint32_t AUX_SW_CONTROL;
+ uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_SW_STATUS;
+ uint32_t AUXN_IMPCAL;
+ uint32_t AUXP_IMPCAL;
+
+ uint32_t AUX_RESET_MASK;
+};
+
+struct aux_engine_dce110 {
+ struct aux_engine base;
+ const struct dce110_aux_registers *regs;
+ struct {
+ uint32_t aux_control;
+ uint32_t aux_arb_control;
+ uint32_t aux_sw_data;
+ uint32_t aux_sw_control;
+ uint32_t aux_interrupt_control;
+ uint32_t aux_sw_status;
+ } addr;
+ uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+ uint32_t engine_id;
+ uint32_t timeout_period;
+ struct dc_context *ctx;
+ const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dal_aux_engine_dce110_create(
+ const struct aux_engine_dce110_init_data *aux_init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
new file mode 100644
index 000000000000..56e25b3d65fd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "include/logger_interface.h"
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_hw_engine.h"
+#include "../i2c_generic_hw_engine.h"
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+#include "reg_helper.h"
+
+/*
+ * This unit
+ */
+
+enum dc_i2c_status {
+ DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+};
+
+enum dc_i2c_arbitration {
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
+};
+
+enum {
+ /* No timeout in HW
+ * (timeout implemented in SW by querying status) */
+ I2C_SETUP_TIME_LIMIT = 255,
+ I2C_HW_BUFFER_SIZE = 538
+};
+
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_hw_engine *'
+ * to pointer 'struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine_dce110, base)
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_engine *'
+ * to pointer to 'struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast pointer to 'struct engine *'
+ * to 'pointer to struct i2c_hw_engine_dce110 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+#define CTX \
+ hw_engine->base.base.base.ctx
+
+#define REG(reg_name)\
+ (hw_engine->regs->reg_name)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hw_engine->i2c_shift->field_name, hw_engine->i2c_mask->field_name
+
+#include "reg_helper.h"
+
+static void disable_i2c_hw_engine(
+ struct i2c_hw_engine_dce110 *hw_engine)
+{
+ REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 0);
+}
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = NULL;
+ bool safe_to_reset;
+
+ base = &hw_engine->base.base;
+
+ /* Restore original HW engine speed */
+
+ base->funcs->set_speed(base, hw_engine->base.original_speed);
+
+ /* Release I2C */
+ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1);
+
+ /* Reset HW engine */
+ {
+ uint32_t i2c_sw_status = 0;
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ /* if used by SW, safe to reset */
+ safe_to_reset = (i2c_sw_status == 1);
+ }
+
+ if (safe_to_reset)
+ REG_UPDATE_2(
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET, 1,
+ DC_I2C_SW_STATUS_RESET, 1);
+ else
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, 1);
+
+ /* HW I2c engine - clock gating feature */
+ if (!hw_engine->engine_keep_power_up_count)
+ disable_i2c_hw_engine(hw_engine);
+}
+
+static bool setup_engine(
+ struct i2c_engine *i2c_engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+
+ /* Program pin select */
+ REG_UPDATE_6(
+ DC_I2C_CONTROL,
+ DC_I2C_GO, 0,
+ DC_I2C_SOFT_RESET, 0,
+ DC_I2C_SEND_RESET, 0,
+ DC_I2C_SW_STATUS_RESET, 1,
+ DC_I2C_TRANSACTION_COUNT, 0,
+ DC_I2C_DDC_SELECT, hw_engine->engine_id);
+
+ /* Program time limit */
+ REG_UPDATE_N(
+ SETUP, 2,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT), I2C_SETUP_TIME_LIMIT,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE), 1);
+
+ /* Program HW priority
+ * set to High - interrupt software I2C at any time
+ * Enable restart of SW I2C that was interrupted by HW
+ * disable queuing of software while I2C is in use by HW */
+ REG_UPDATE_2(
+ DC_I2C_ARBITRATION,
+ DC_I2C_NO_QUEUED_SW_GO, 0,
+ DC_I2C_SW_PRIORITY, DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL);
+
+ return true;
+}
+
+static uint32_t get_speed(
+ const struct i2c_engine *i2c_engine)
+{
+ const struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t pre_scale = 0;
+
+ REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale);
+
+ /* [anaumov] it seems following is unnecessary */
+ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
+ return pre_scale ?
+ hw_engine->reference_frequency / pre_scale :
+ hw_engine->base.default_speed;
+}
+
+static void set_speed(
+ struct i2c_engine *i2c_engine,
+ uint32_t speed)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+
+ if (speed) {
+ if (hw_engine->i2c_mask->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+ REG_UPDATE_N(
+ SPEED, 3,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+ else
+ REG_UPDATE_N(
+ SPEED, 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), hw_engine->reference_frequency / speed,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
+ }
+}
+
+static inline void reset_hw_engine(struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+
+ REG_UPDATE_2(
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET, 1,
+ DC_I2C_SW_STATUS_RESET, 1);
+}
+
+static bool is_hw_busy(struct engine *engine)
+{
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_ENGINE(engine);
+ uint32_t i2c_sw_status = 0;
+
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
+ return false;
+
+ reset_hw_engine(engine);
+
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+}
+
+
+#define STOP_TRANS_PREDICAT \
+ ((hw_engine->transaction_count == 3) || \
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) || \
+ (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ))
+
+#define SET_I2C_TRANSACTION(id) \
+ do { \
+ REG_UPDATE_N(DC_I2C_TRANSACTION##id, 5, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0), 1, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_START0), 1, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_STOP0), STOP_TRANS_PREDICAT ? 1:0, \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_RW0), (0 != (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)), \
+ FN(DC_I2C_TRANSACTION0, DC_I2C_COUNT0), length); \
+ if (STOP_TRANS_PREDICAT) \
+ last_transaction = true; \
+ } while (false)
+
+
+static bool process_transaction(
+ struct i2c_hw_engine_dce110 *hw_engine,
+ struct i2c_request_transaction_data *request)
+{
+ uint32_t length = request->length;
+ uint8_t *buffer = request->data;
+ uint32_t value = 0;
+
+ bool last_transaction = false;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = hw_engine->base.base.base.ctx;
+
+
+
+ switch (hw_engine->transaction_count) {
+ case 0:
+ SET_I2C_TRANSACTION(0);
+ break;
+ case 1:
+ SET_I2C_TRANSACTION(1);
+ break;
+ case 2:
+ SET_I2C_TRANSACTION(2);
+ break;
+ case 3:
+ SET_I2C_TRANSACTION(3);
+ break;
+ default:
+ /* TODO Warning ? */
+ break;
+ }
+
+
+ /* Write the I2C address and I2C data
+ * into the hardware circular buffer, one byte per entry.
+ * As an example, the 7-bit I2C slave address for CRT monitor
+ * for reading DDC/EDID information is 0b1010001.
+ * For an I2C send operation, the LSB must be programmed to 0;
+ * for I2C receive operation, the LSB must be programmed to 1. */
+ if (hw_engine->transaction_count == 0) {
+ value = REG_SET_4(DC_I2C_DATA, 0,
+ DC_I2C_DATA_RW, false,
+ DC_I2C_DATA, request->address,
+ DC_I2C_INDEX, 0,
+ DC_I2C_INDEX_WRITE, 1);
+ hw_engine->buffer_used_write = 0;
+ } else
+ value = REG_SET_2(DC_I2C_DATA, 0,
+ DC_I2C_DATA_RW, false,
+ DC_I2C_DATA, request->address);
+
+ hw_engine->buffer_used_write++;
+
+ if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+ while (length) {
+ REG_SET_2(DC_I2C_DATA, value,
+ DC_I2C_INDEX_WRITE, 0,
+ DC_I2C_DATA, *buffer++);
+ hw_engine->buffer_used_write++;
+ --length;
+ }
+ }
+
+ ++hw_engine->transaction_count;
+ hw_engine->buffer_used_bytes += length + 1;
+
+ return last_transaction;
+}
+
+static void execute_transaction(
+ struct i2c_hw_engine_dce110 *hw_engine)
+{
+ REG_UPDATE_N(SETUP, 5,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY), 0,
+ FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY), 0);
+
+
+ REG_UPDATE_5(DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET, 0,
+ DC_I2C_SW_STATUS_RESET, 0,
+ DC_I2C_SEND_RESET, 0,
+ DC_I2C_GO, 0,
+ DC_I2C_TRANSACTION_COUNT, hw_engine->transaction_count - 1);
+
+ /* start I2C transfer */
+ REG_UPDATE(DC_I2C_CONTROL, DC_I2C_GO, 1);
+
+ /* all transactions were executed and HW buffer became empty
+ * (even though it actually happens when status becomes DONE) */
+ hw_engine->transaction_count = 0;
+ hw_engine->buffer_used_bytes = 0;
+}
+
+static void submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ if (!process_transaction(FROM_I2C_ENGINE(engine), request))
+ return;
+
+ if (is_hw_busy(&engine->base)) {
+ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ return;
+ }
+
+ execute_transaction(FROM_I2C_ENGINE(engine));
+}
+
+static void process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+ uint32_t length = reply->length;
+ uint8_t *buffer = reply->data;
+
+ struct i2c_hw_engine_dce110 *hw_engine =
+ FROM_I2C_ENGINE(engine);
+
+
+ REG_SET_3(DC_I2C_DATA, 0,
+ DC_I2C_INDEX, hw_engine->buffer_used_write,
+ DC_I2C_DATA_RW, 1,
+ DC_I2C_INDEX_WRITE, 1);
+
+ while (length) {
+ /* after reading the status,
+ * if the I2C operation executed successfully
+ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+ * should read data bytes from I2C circular data buffer */
+
+ uint32_t i2c_data;
+
+ REG_GET(DC_I2C_DATA, DC_I2C_DATA, &i2c_data);
+ *buffer++ = i2c_data;
+
+ --length;
+ }
+}
+
+static enum i2c_channel_operation_result get_channel_status(
+ struct i2c_engine *i2c_engine,
+ uint8_t *returned_bytes)
+{
+ uint32_t i2c_sw_status = 0;
+ struct i2c_hw_engine_dce110 *hw_engine = FROM_I2C_ENGINE(i2c_engine);
+ uint32_t value =
+ REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_STOPPED_ON_NACK)
+ return I2C_CHANNEL_OPERATION_NO_RESPONSE;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_TIMEOUT)
+ return I2C_CHANNEL_OPERATION_TIMEOUT;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_ABORTED)
+ return I2C_CHANNEL_OPERATION_FAILED;
+ else if (value & hw_engine->i2c_mask->DC_I2C_SW_DONE)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ /*
+ * this is the case when HW used for communication, I2C_SW_STATUS
+ * could be zero
+ */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+static uint32_t get_hw_buffer_available_size(
+ const struct i2c_hw_engine *engine)
+{
+ return I2C_HW_BUFFER_SIZE -
+ FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
+}
+
+static uint32_t get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ uint32_t speed = engine->base.funcs->get_speed(&engine->base);
+
+ uint32_t period_timeout;
+ uint32_t num_of_clock_stretches;
+
+ if (!speed)
+ return 0;
+
+ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
+
+ num_of_clock_stretches = 1 + (length << 3) + 1;
+ num_of_clock_stretches +=
+ (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
+ (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
+
+ return period_timeout * num_of_clock_stretches;
+}
+
+static void destroy(
+ struct i2c_engine **i2c_engine)
+{
+ struct i2c_hw_engine_dce110 *engine_dce110 =
+ FROM_I2C_ENGINE(*i2c_engine);
+
+ dal_i2c_hw_engine_destruct(&engine_dce110->base);
+
+ kfree(engine_dce110);
+
+ *i2c_engine = NULL;
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .destroy = destroy,
+ .get_speed = get_speed,
+ .set_speed = set_speed,
+ .setup_engine = setup_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .acquire_engine = dal_i2c_hw_engine_acquire_engine,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_hw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_hw_engine_submit_request,
+};
+
+static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
+ .get_hw_buffer_available_size = get_hw_buffer_available_size,
+ .get_transaction_timeout = get_transaction_timeout,
+ .wait_on_operation_result = dal_i2c_hw_engine_wait_on_operation_result,
+};
+
+static void construct(
+ struct i2c_hw_engine_dce110 *hw_engine,
+ const struct i2c_hw_engine_dce110_create_arg *arg)
+{
+ uint32_t xtal_ref_div = 0;
+
+ dal_i2c_hw_engine_construct(&hw_engine->base, arg->ctx);
+
+ hw_engine->base.base.base.funcs = &engine_funcs;
+ hw_engine->base.base.funcs = &i2c_engine_funcs;
+ hw_engine->base.funcs = &i2c_hw_engine_funcs;
+ hw_engine->base.default_speed = arg->default_speed;
+
+ hw_engine->regs = arg->regs;
+ hw_engine->i2c_shift = arg->i2c_shift;
+ hw_engine->i2c_mask = arg->i2c_mask;
+
+ hw_engine->engine_id = arg->engine_id;
+
+ hw_engine->buffer_used_bytes = 0;
+ hw_engine->transaction_count = 0;
+ hw_engine->engine_keep_power_up_count = 1;
+
+
+ REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
+
+ if (xtal_ref_div == 0) {
+ dm_logger_write(
+ hw_engine->base.base.base.ctx->logger, LOG_WARNING,
+ "Invalid base timer divider\n",
+ __func__);
+ xtal_ref_div = 2;
+ }
+
+ /*Calculating Reference Clock by divding original frequency by
+ * XTAL_REF_DIV.
+ * At upper level, uint32_t reference_frequency =
+ * dal_i2caux_get_reference_clock(as) >> 1
+ * which already divided by 2. So we need x2 to get original
+ * reference clock from ppll_info
+ */
+ hw_engine->reference_frequency =
+ (arg->reference_frequency * 2) / xtal_ref_div;
+}
+
+struct i2c_engine *dal_i2c_hw_engine_dce110_create(
+ const struct i2c_hw_engine_dce110_create_arg *arg)
+{
+ struct i2c_hw_engine_dce110 *engine_dce10;
+
+ if (!arg) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+ if (!arg->reference_frequency) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine_dce10 = kzalloc(sizeof(struct i2c_hw_engine_dce110),
+ GFP_KERNEL);
+
+ if (!engine_dce10) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine_dce10, arg);
+ return &engine_dce10->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
new file mode 100644
index 000000000000..5bb04085f670
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_DCE110_H__
+#define __DAL_I2C_HW_ENGINE_DCE110_H__
+
+#define I2C_HW_ENGINE_COMMON_REG_LIST(id)\
+ SRI(SETUP, DC_I2C_DDC, id),\
+ SRI(SPEED, DC_I2C_DDC, id),\
+ SR(DC_I2C_ARBITRATION),\
+ SR(DC_I2C_CONTROL),\
+ SR(DC_I2C_SW_STATUS),\
+ SR(DC_I2C_TRANSACTION0),\
+ SR(DC_I2C_TRANSACTION1),\
+ SR(DC_I2C_TRANSACTION2),\
+ SR(DC_I2C_TRANSACTION3),\
+ SR(DC_I2C_DATA),\
+ SR(MICROSECOND_TIME_BASE_DIV)
+
+#define I2C_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_ENABLE, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_TIME_LIMIT, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_EN, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_DRIVE_EN, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_DATA_DRIVE_SEL, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_TRANSACTION_DELAY, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_INTRA_BYTE_DELAY, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_NO_QUEUED_SW_GO, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_SW_PRIORITY, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SW_STATUS_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_GO, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_SEND_RESET, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_TRANSACTION_COUNT, mask_sh),\
+ I2C_SF(DC_I2C_CONTROL, DC_I2C_DDC_SELECT, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE, mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STOPPED_ON_NACK, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_TIMEOUT, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_ABORTED, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_DONE, mask_sh),\
+ I2C_SF(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP_ON_NACK0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_START0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_RW0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_STOP0, mask_sh),\
+ I2C_SF(DC_I2C_TRANSACTION0, DC_I2C_COUNT0, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_DATA_RW, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
+ I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
+
+#define I2C_COMMON_MASK_SH_LIST_DCE100(mask_sh)\
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh)
+
+#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
+ I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
+ I2C_SF(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL, mask_sh)
+
+struct dce110_i2c_hw_engine_shift {
+ uint8_t DC_I2C_DDC1_ENABLE;
+ uint8_t DC_I2C_DDC1_TIME_LIMIT;
+ uint8_t DC_I2C_DDC1_DATA_DRIVE_EN;
+ uint8_t DC_I2C_DDC1_CLK_DRIVE_EN;
+ uint8_t DC_I2C_DDC1_DATA_DRIVE_SEL;
+ uint8_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
+ uint8_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
+ uint8_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint8_t DC_I2C_NO_QUEUED_SW_GO;
+ uint8_t DC_I2C_SW_PRIORITY;
+ uint8_t DC_I2C_SOFT_RESET;
+ uint8_t DC_I2C_SW_STATUS_RESET;
+ uint8_t DC_I2C_GO;
+ uint8_t DC_I2C_SEND_RESET;
+ uint8_t DC_I2C_TRANSACTION_COUNT;
+ uint8_t DC_I2C_DDC_SELECT;
+ uint8_t DC_I2C_DDC1_PRESCALE;
+ uint8_t DC_I2C_DDC1_THRESHOLD;
+ uint8_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
+ uint8_t DC_I2C_SW_STOPPED_ON_NACK;
+ uint8_t DC_I2C_SW_TIMEOUT;
+ uint8_t DC_I2C_SW_ABORTED;
+ uint8_t DC_I2C_SW_DONE;
+ uint8_t DC_I2C_SW_STATUS;
+ uint8_t DC_I2C_STOP_ON_NACK0;
+ uint8_t DC_I2C_START0;
+ uint8_t DC_I2C_RW0;
+ uint8_t DC_I2C_STOP0;
+ uint8_t DC_I2C_COUNT0;
+ uint8_t DC_I2C_DATA_RW;
+ uint8_t DC_I2C_DATA;
+ uint8_t DC_I2C_INDEX;
+ uint8_t DC_I2C_INDEX_WRITE;
+ uint8_t XTAL_REF_DIV;
+};
+
+struct dce110_i2c_hw_engine_mask {
+ uint32_t DC_I2C_DDC1_ENABLE;
+ uint32_t DC_I2C_DDC1_TIME_LIMIT;
+ uint32_t DC_I2C_DDC1_DATA_DRIVE_EN;
+ uint32_t DC_I2C_DDC1_CLK_DRIVE_EN;
+ uint32_t DC_I2C_DDC1_DATA_DRIVE_SEL;
+ uint32_t DC_I2C_DDC1_INTRA_TRANSACTION_DELAY;
+ uint32_t DC_I2C_DDC1_INTRA_BYTE_DELAY;
+ uint32_t DC_I2C_SW_DONE_USING_I2C_REG;
+ uint32_t DC_I2C_NO_QUEUED_SW_GO;
+ uint32_t DC_I2C_SW_PRIORITY;
+ uint32_t DC_I2C_SOFT_RESET;
+ uint32_t DC_I2C_SW_STATUS_RESET;
+ uint32_t DC_I2C_GO;
+ uint32_t DC_I2C_SEND_RESET;
+ uint32_t DC_I2C_TRANSACTION_COUNT;
+ uint32_t DC_I2C_DDC_SELECT;
+ uint32_t DC_I2C_DDC1_PRESCALE;
+ uint32_t DC_I2C_DDC1_THRESHOLD;
+ uint32_t DC_I2C_DDC1_START_STOP_TIMING_CNTL;
+ uint32_t DC_I2C_SW_STOPPED_ON_NACK;
+ uint32_t DC_I2C_SW_TIMEOUT;
+ uint32_t DC_I2C_SW_ABORTED;
+ uint32_t DC_I2C_SW_DONE;
+ uint32_t DC_I2C_SW_STATUS;
+ uint32_t DC_I2C_STOP_ON_NACK0;
+ uint32_t DC_I2C_START0;
+ uint32_t DC_I2C_RW0;
+ uint32_t DC_I2C_STOP0;
+ uint32_t DC_I2C_COUNT0;
+ uint32_t DC_I2C_DATA_RW;
+ uint32_t DC_I2C_DATA;
+ uint32_t DC_I2C_INDEX;
+ uint32_t DC_I2C_INDEX_WRITE;
+ uint32_t XTAL_REF_DIV;
+};
+
+struct dce110_i2c_hw_engine_registers {
+ uint32_t SETUP;
+ uint32_t SPEED;
+ uint32_t DC_I2C_ARBITRATION;
+ uint32_t DC_I2C_CONTROL;
+ uint32_t DC_I2C_SW_STATUS;
+ uint32_t DC_I2C_TRANSACTION0;
+ uint32_t DC_I2C_TRANSACTION1;
+ uint32_t DC_I2C_TRANSACTION2;
+ uint32_t DC_I2C_TRANSACTION3;
+ uint32_t DC_I2C_DATA;
+ uint32_t MICROSECOND_TIME_BASE_DIV;
+};
+
+struct i2c_hw_engine_dce110 {
+ struct i2c_hw_engine base;
+ const struct dce110_i2c_hw_engine_registers *regs;
+ const struct dce110_i2c_hw_engine_shift *i2c_shift;
+ const struct dce110_i2c_hw_engine_mask *i2c_mask;
+ struct {
+ uint32_t DC_I2C_DDCX_SETUP;
+ uint32_t DC_I2C_DDCX_SPEED;
+ } addr;
+ uint32_t engine_id;
+ /* expressed in kilohertz */
+ uint32_t reference_frequency;
+ /* number of bytes currently used in HW buffer */
+ uint32_t buffer_used_bytes;
+ /* number of bytes used for write transaction in HW buffer
+ * - this will be used as the index to read from*/
+ uint32_t buffer_used_write;
+ /* number of pending transactions (before GO) */
+ uint32_t transaction_count;
+ uint32_t engine_keep_power_up_count;
+};
+
+struct i2c_hw_engine_dce110_create_arg {
+ uint32_t engine_id;
+ uint32_t reference_frequency;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+ const struct dce110_i2c_hw_engine_registers *regs;
+ const struct dce110_i2c_hw_engine_shift *i2c_shift;
+ const struct dce110_i2c_hw_engine_mask *i2c_mask;
+};
+
+struct i2c_engine *dal_i2c_hw_engine_dce110_create(
+ const struct i2c_hw_engine_dce110_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
new file mode 100644
index 000000000000..3aa7f791e523
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_sw_engine *'
+ * to 'struct i2c_sw_engine_dce110 *'
+ */
+#define FROM_I2C_SW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine_dce110, base)
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+}
+
+static void destruct(
+ struct i2c_sw_engine_dce110 *engine)
+{
+ dal_i2c_sw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **engine)
+{
+ struct i2c_sw_engine_dce110 *sw_engine = FROM_I2C_ENGINE(*engine);
+
+ destruct(sw_engine);
+
+ kfree(sw_engine);
+
+ *engine = NULL;
+}
+
+static bool acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle)
+{
+ return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+static void construct(
+ struct i2c_sw_engine_dce110 *engine_dce110,
+ const struct i2c_sw_engine_dce110_create_arg *arg_dce110)
+{
+ struct i2c_sw_engine_create_arg arg_base;
+
+ arg_base.ctx = arg_dce110->ctx;
+ arg_base.default_speed = arg_dce110->default_speed;
+
+ dal_i2c_sw_engine_construct(&engine_dce110->base, &arg_base);
+
+ /*struct engine struct engine_funcs*/
+ engine_dce110->base.base.base.funcs = &engine_funcs;
+ /*struct i2c_engine struct i2c_engine_funcs*/
+ engine_dce110->base.base.funcs = &i2c_engine_funcs;
+ engine_dce110->base.default_speed = arg_dce110->default_speed;
+ engine_dce110->engine_id = arg_dce110->engine_id;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_dce110_create(
+ const struct i2c_sw_engine_dce110_create_arg *arg)
+{
+ struct i2c_sw_engine_dce110 *engine_dce110;
+
+ if (!arg) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ engine_dce110 = kzalloc(sizeof(struct i2c_sw_engine_dce110),
+ GFP_KERNEL);
+
+ if (!engine_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(engine_dce110, arg);
+ return &engine_dce110->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
new file mode 100644
index 000000000000..c48c61f540a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_sw_engine_dce110.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_DCE110_H__
+#define __DAL_I2C_SW_ENGINE_DCE110_H__
+
+struct i2c_sw_engine_dce110 {
+ struct i2c_sw_engine base;
+ uint32_t engine_id;
+};
+
+struct i2c_sw_engine_dce110_create_arg {
+ uint32_t engine_id;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_sw_engine_dce110_create(
+ const struct i2c_sw_engine_dce110_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
new file mode 100644
index 000000000000..2a047f8ca0e9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+#include "i2caux_dce110.h"
+
+#include "i2c_sw_engine_dce110.h"
+#include "i2c_hw_engine_dce110.h"
+#include "aux_engine_dce110.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+/*cast pointer to struct i2caux TO pointer to struct i2caux_dce110*/
+#define FROM_I2C_AUX(ptr) \
+ container_of((ptr), struct i2caux_dce110, base)
+
+static void destruct(
+ struct i2caux_dce110 *i2caux_dce110)
+{
+ dal_i2caux_destruct(&i2caux_dce110->base);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(*i2c_engine);
+
+ destruct(i2caux_dce110);
+
+ kfree(i2caux_dce110);
+
+ *i2c_engine = NULL;
+}
+
+static struct i2c_engine *acquire_i2c_hw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
+
+ struct i2c_engine *engine = NULL;
+ /* generic hw engine is not used for EDID read
+ * It may be needed for external i2c device, like thermal chip,
+ * TODO will be implemented when needed.
+ * check dce80 bool non_generic for generic hw engine;
+ */
+
+ if (!ddc)
+ return NULL;
+
+ if (ddc->hw_info.hw_supported) {
+ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+
+ if (line < GPIO_DDC_LINE_COUNT)
+ engine = i2caux->i2c_hw_engines[line];
+ }
+
+ if (!engine)
+ return NULL;
+
+ if (!i2caux_dce110->i2c_hw_buffer_in_use &&
+ engine->base.funcs->acquire(&engine->base, ddc)) {
+ i2caux_dce110->i2c_hw_buffer_in_use = true;
+ return engine;
+ }
+
+ return NULL;
+}
+
+static void release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ struct i2caux_dce110 *i2caux_dce110 = FROM_I2C_AUX(i2caux);
+
+ if (engine->funcs->get_engine_type(engine) ==
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
+ i2caux_dce110->i2c_hw_buffer_in_use = false;
+
+ dal_i2caux_release_engine(i2caux, engine);
+}
+
+static const enum gpio_ddc_line hw_ddc_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+};
+
+static const enum gpio_ddc_line hw_aux_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+};
+
+/* function table */
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
+ .release_engine = release_engine,
+ .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
+ .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
+};
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce110_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+static const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
+ const struct dce110_aux_registers aux_regs[],
+ const struct dce110_i2c_hw_engine_registers i2c_hw_engine_regs[],
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+ const struct dce110_i2c_hw_engine_mask *i2c_mask)
+{
+ uint32_t i = 0;
+ uint32_t reference_frequency = 0;
+ bool use_i2c_sw_engine = false;
+ struct i2caux *base = NULL;
+ /*TODO: For CZ bring up, if dal_i2caux_get_reference_clock
+ * does not return 48KHz, we need hard coded for 48Khz.
+ * Some BIOS setting incorrect cause this
+ * For production, we always get value from BIOS*/
+ reference_frequency =
+ dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
+
+ base = &i2caux_dce110->base;
+
+ dal_i2caux_construct(base, ctx);
+
+ i2caux_dce110->base.funcs = &i2caux_funcs;
+ i2caux_dce110->i2c_hw_buffer_in_use = false;
+ /* Create I2C engines (DDC lines per connector)
+ * different I2C/AUX usage cases, DDC, Generic GPIO, AUX.
+ */
+ do {
+ enum gpio_ddc_line line_id = hw_ddc_lines[i];
+
+ struct i2c_hw_engine_dce110_create_arg hw_arg_dce110;
+
+ if (use_i2c_sw_engine) {
+ struct i2c_sw_engine_dce110_create_arg sw_arg;
+
+ sw_arg.engine_id = i;
+ sw_arg.default_speed = base->default_i2c_sw_speed;
+ sw_arg.ctx = ctx;
+ base->i2c_sw_engines[line_id] =
+ dal_i2c_sw_engine_dce110_create(&sw_arg);
+ }
+
+ hw_arg_dce110.engine_id = i;
+ hw_arg_dce110.reference_frequency = reference_frequency;
+ hw_arg_dce110.default_speed = base->default_i2c_hw_speed;
+ hw_arg_dce110.ctx = ctx;
+ hw_arg_dce110.regs = &i2c_hw_engine_regs[i];
+ hw_arg_dce110.i2c_shift = i2c_shift;
+ hw_arg_dce110.i2c_mask = i2c_mask;
+
+ base->i2c_hw_engines[line_id] =
+ dal_i2c_hw_engine_dce110_create(&hw_arg_dce110);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_ddc_lines));
+
+ /* Create AUX engines for all lines which has assisted HW AUX
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_aux_lines[i];
+
+ struct aux_engine_dce110_init_data aux_init_data;
+
+ aux_init_data.engine_id = i;
+ aux_init_data.timeout_period = base->aux_timeout_period;
+ aux_init_data.ctx = ctx;
+ aux_init_data.regs = &aux_regs[i];
+
+ base->aux_engines[line_id] =
+ dal_aux_engine_dce110_create(&aux_init_data);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_aux_lines));
+
+ /*TODO Generic I2C SW and HW*/
+}
+
+/*
+ * dal_i2caux_dce110_create
+ *
+ * @brief
+ * public interface to allocate memory for DCE11 I2CAUX
+ *
+ * @param
+ * struct adapter_service *as - [in]
+ * struct dc_context *ctx - [in]
+ *
+ * @return
+ * pointer to the base struct of DCE11 I2CAUX
+ */
+struct i2caux *dal_i2caux_dce110_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce110_aux_regs,
+ i2c_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
new file mode 100644
index 000000000000..1b1f71c60ac9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2caux_dce110.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE110_H__
+#define __DAL_I2C_AUX_DCE110_H__
+
+#include "../i2caux.h"
+
+struct i2caux_dce110 {
+ struct i2caux base;
+ /* indicate the I2C HW circular buffer is in use */
+ bool i2c_hw_buffer_in_use;
+};
+
+struct dce110_aux_registers;
+struct dce110_i2c_hw_engine_registers;
+struct dce110_i2c_hw_engine_shift;
+struct dce110_i2c_hw_engine_mask;
+
+struct i2caux *dal_i2caux_dce110_create(
+ struct dc_context *ctx);
+
+void dal_i2caux_dce110_construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx,
+ const struct dce110_aux_registers *aux_regs,
+ const struct dce110_i2c_hw_engine_registers *i2c_hw_engine_regs,
+ const struct dce110_i2c_hw_engine_shift *i2c_shift,
+ const struct dce110_i2c_hw_engine_mask *i2c_mask);
+
+#endif /* __DAL_I2C_AUX_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
new file mode 100644
index 000000000000..dafc1a727f7f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/i2caux_dce110.h"
+#include "i2caux_dce112.h"
+
+#include "../dce110/aux_engine_dce110.h"
+
+#include "../dce110/i2c_hw_engine_dce110.h"
+
+#include "dce/dce_11_2_d.h"
+#include "dce/dce_11_2_sh_mask.h"
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dce112_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dce112_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+static void construct(
+ struct i2caux_dce110 *i2caux_dce110,
+ struct dc_context *ctx)
+{
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce112_aux_regs,
+ dce112_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+}
+
+/*
+ * dal_i2caux_dce110_create
+ *
+ * @brief
+ * public interface to allocate memory for DCE11 I2CAUX
+ *
+ * @param
+ * struct adapter_service *as - [in]
+ * struct dc_context *ctx - [in]
+ *
+ * @return
+ * pointer to the base struct of DCE11 I2CAUX
+ */
+struct i2caux *dal_i2caux_dce112_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(i2caux_dce110, ctx);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
new file mode 100644
index 000000000000..8d35453c25b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce112/i2caux_dce112.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE112_H__
+#define __DAL_I2C_AUX_DCE112_H__
+
+struct i2caux *dal_i2caux_dce112_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE112_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
new file mode 100644
index 000000000000..668981a4c285
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
+}
+
+static const struct dce110_aux_registers dce120_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_i2c_hw_engine_registers dce120_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+struct i2caux *dal_i2caux_dce120_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dce120_aux_regs,
+ dce120_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h
new file mode 100644
index 000000000000..b6ac47617c70
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce120/i2caux_dce120.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE120_H__
+#define __DAL_I2C_AUX_DCE120_H__
+
+struct i2caux *dal_i2caux_dce120_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCE120_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
new file mode 100644
index 000000000000..fd0832dd2c75
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_hw_engine.h"
+#include "../i2c_generic_hw_engine.h"
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+/*
+ * This unit
+ */
+
+enum dc_i2c_status {
+ DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+};
+
+enum dc_i2c_arbitration {
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_HIGH
+};
+
+enum {
+ /* No timeout in HW
+ * (timeout implemented in SW by querying status) */
+ I2C_SETUP_TIME_LIMIT = 255,
+ I2C_HW_BUFFER_SIZE = 144
+};
+
+/*
+ * @brief
+ * Cast 'struct i2c_hw_engine *'
+ * to 'struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine_dce80, base)
+
+/*
+ * @brief
+ * Cast pointer to 'struct i2c_engine *'
+ * to pointer to 'struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast pointer to 'struct engine *'
+ * to 'pointer to struct i2c_hw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void disable_i2c_hw_engine(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+ uint32_t value = 0;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_ENABLE);
+
+ dm_write_reg(ctx, addr, value);
+}
+
+static void release_engine(
+ struct engine *engine)
+{
+ struct i2c_hw_engine_dce80 *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = NULL;
+ bool safe_to_reset;
+ uint32_t value = 0;
+
+ base = &hw_engine->base.base;
+
+ /* Restore original HW engine speed */
+
+ base->funcs->set_speed(base, hw_engine->base.original_speed);
+
+ /* Release I2C */
+ {
+ value = dm_read_reg(engine->ctx, mmDC_I2C_ARBITRATION);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_ARBITRATION,
+ DC_I2C_SW_DONE_USING_I2C_REG);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_ARBITRATION, value);
+ }
+
+ /* Reset HW engine */
+ {
+ uint32_t i2c_sw_status = 0;
+
+ value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+ /* if used by SW, safe to reset */
+ safe_to_reset = (i2c_sw_status == 1);
+ }
+ {
+ value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
+
+ if (safe_to_reset)
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
+ }
+
+ /* HW I2c engine - clock gating feature */
+ if (!hw_engine->engine_keep_power_up_count)
+ disable_i2c_hw_engine(hw_engine);
+}
+
+static void destruct(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ dal_i2c_hw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **i2c_engine)
+{
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(*i2c_engine);
+
+ destruct(engine);
+
+ kfree(engine);
+
+ *i2c_engine = NULL;
+}
+
+static bool setup_engine(
+ struct i2c_engine *i2c_engine)
+{
+ uint32_t value = 0;
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ /* Program pin select */
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SEND_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_TRANSACTION_COUNT);
+
+ set_reg_field_value(
+ value,
+ engine->engine_id,
+ DC_I2C_CONTROL,
+ DC_I2C_DDC_SELECT);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+
+ /* Program time limit */
+ {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+
+ value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ I2C_SETUP_TIME_LIMIT,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_TIME_LIMIT);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_ENABLE);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+
+ /* Program HW priority
+ * set to High - interrupt software I2C at any time
+ * Enable restart of SW I2C that was interrupted by HW
+ * disable queuing of software while I2C is in use by HW */
+ {
+ value = dm_read_reg(i2c_engine->base.ctx,
+ mmDC_I2C_ARBITRATION);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_ARBITRATION,
+ DC_I2C_NO_QUEUED_SW_GO);
+
+ set_reg_field_value(
+ value,
+ DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_NORMAL,
+ DC_I2C_ARBITRATION,
+ DC_I2C_SW_PRIORITY);
+
+ dm_write_reg(i2c_engine->base.ctx,
+ mmDC_I2C_ARBITRATION, value);
+ }
+
+ return true;
+}
+
+static uint32_t get_speed(
+ const struct i2c_engine *i2c_engine)
+{
+ const struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
+
+ uint32_t pre_scale = 0;
+
+ uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ pre_scale = get_reg_field_value(
+ value,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_PRESCALE);
+
+ /* [anaumov] it seems following is unnecessary */
+ /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/
+
+ return pre_scale ?
+ engine->reference_frequency / pre_scale :
+ engine->base.default_speed;
+}
+
+static void set_speed(
+ struct i2c_engine *i2c_engine,
+ uint32_t speed)
+{
+ struct i2c_hw_engine_dce80 *engine = FROM_I2C_ENGINE(i2c_engine);
+
+ if (speed) {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SPEED;
+
+ uint32_t value = dm_read_reg(i2c_engine->base.ctx, addr);
+
+ set_reg_field_value(
+ value,
+ engine->reference_frequency / speed,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_PRESCALE);
+
+ set_reg_field_value(
+ value,
+ 2,
+ DC_I2C_DDC1_SPEED,
+ DC_I2C_DDC1_THRESHOLD);
+
+ dm_write_reg(i2c_engine->base.ctx, addr, value);
+ }
+}
+
+static inline void reset_hw_engine(struct engine *engine)
+{
+ uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_CONTROL);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ dm_write_reg(engine->ctx, mmDC_I2C_CONTROL, value);
+}
+
+static bool is_hw_busy(struct engine *engine)
+{
+ uint32_t i2c_sw_status = 0;
+
+ uint32_t value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_IDLE)
+ return false;
+
+ reset_hw_engine(engine);
+
+ value = dm_read_reg(engine->ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ return i2c_sw_status != DC_I2C_STATUS__DC_I2C_STATUS_IDLE;
+}
+
+/*
+ * @brief
+ * DC_GPIO_DDC MM register offsets
+ */
+static const uint32_t transaction_addr[] = {
+ mmDC_I2C_TRANSACTION0,
+ mmDC_I2C_TRANSACTION1,
+ mmDC_I2C_TRANSACTION2,
+ mmDC_I2C_TRANSACTION3
+};
+
+static bool process_transaction(
+ struct i2c_hw_engine_dce80 *engine,
+ struct i2c_request_transaction_data *request)
+{
+ uint32_t length = request->length;
+ uint8_t *buffer = request->data;
+
+ bool last_transaction = false;
+ uint32_t value = 0;
+
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ {
+ const uint32_t addr =
+ transaction_addr[engine->transaction_count];
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP_ON_NACK0);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_START0);
+
+ if ((engine->transaction_count == 3) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP0);
+
+ last_transaction = true;
+ } else
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_STOP0);
+
+ set_reg_field_value(
+ value,
+ (0 != (request->action &
+ I2CAUX_TRANSACTION_ACTION_I2C_READ)),
+ DC_I2C_TRANSACTION0,
+ DC_I2C_RW0);
+
+ set_reg_field_value(
+ value,
+ length,
+ DC_I2C_TRANSACTION0,
+ DC_I2C_COUNT0);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* Write the I2C address and I2C data
+ * into the hardware circular buffer, one byte per entry.
+ * As an example, the 7-bit I2C slave address for CRT monitor
+ * for reading DDC/EDID information is 0b1010001.
+ * For an I2C send operation, the LSB must be programmed to 0;
+ * for I2C receive operation, the LSB must be programmed to 1. */
+
+ {
+ value = 0;
+
+ set_reg_field_value(
+ value,
+ false,
+ DC_I2C_DATA,
+ DC_I2C_DATA_RW);
+
+ set_reg_field_value(
+ value,
+ request->address,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ if (engine->transaction_count == 0) {
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DATA,
+ DC_I2C_INDEX);
+
+ /*enable index write*/
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+ }
+
+ dm_write_reg(ctx, mmDC_I2C_DATA, value);
+
+ if (!(request->action & I2CAUX_TRANSACTION_ACTION_I2C_READ)) {
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+
+ while (length) {
+
+ set_reg_field_value(
+ value,
+ *buffer++,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ dm_write_reg(ctx, mmDC_I2C_DATA, value);
+ --length;
+ }
+ }
+ }
+
+ ++engine->transaction_count;
+ engine->buffer_used_bytes += length + 1;
+
+ return last_transaction;
+}
+
+static void execute_transaction(
+ struct i2c_hw_engine_dce80 *engine)
+{
+ uint32_t value = 0;
+ struct dc_context *ctx = NULL;
+
+ ctx = engine->base.base.base.ctx;
+
+ {
+ const uint32_t addr = engine->addr.DC_I2C_DDCX_SETUP;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_DATA_DRIVE_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_CLK_DRIVE_EN);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_DATA_DRIVE_SEL);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_INTRA_TRANSACTION_DELAY);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_DDC1_SETUP,
+ DC_I2C_DDC1_INTRA_BYTE_DELAY);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SOFT_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SW_STATUS_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_SEND_RESET);
+
+ set_reg_field_value(
+ value,
+ 0,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ set_reg_field_value(
+ value,
+ engine->transaction_count - 1,
+ DC_I2C_CONTROL,
+ DC_I2C_TRANSACTION_COUNT);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* start I2C transfer */
+ {
+ const uint32_t addr = mmDC_I2C_CONTROL;
+
+ value = dm_read_reg(ctx, addr);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_CONTROL,
+ DC_I2C_GO);
+
+ dm_write_reg(ctx, addr, value);
+ }
+
+ /* all transactions were executed and HW buffer became empty
+ * (even though it actually happens when status becomes DONE) */
+ engine->transaction_count = 0;
+ engine->buffer_used_bytes = 0;
+}
+
+static void submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+ request->status = I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ if (!process_transaction(FROM_I2C_ENGINE(engine), request))
+ return;
+
+ if (is_hw_busy(&engine->base)) {
+ request->status = I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ return;
+ }
+
+ execute_transaction(FROM_I2C_ENGINE(engine));
+}
+
+static void process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+ uint32_t length = reply->length;
+ uint8_t *buffer = reply->data;
+
+ uint32_t value = 0;
+
+ /*set index*/
+ set_reg_field_value(
+ value,
+ length - 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_DATA_RW);
+
+ set_reg_field_value(
+ value,
+ 1,
+ DC_I2C_DATA,
+ DC_I2C_INDEX_WRITE);
+
+ dm_write_reg(engine->base.ctx, mmDC_I2C_DATA, value);
+
+ while (length) {
+ /* after reading the status,
+ * if the I2C operation executed successfully
+ * (i.e. DC_I2C_STATUS_DONE = 1) then the I2C controller
+ * should read data bytes from I2C circular data buffer */
+
+ value = dm_read_reg(engine->base.ctx, mmDC_I2C_DATA);
+
+ *buffer++ = get_reg_field_value(
+ value,
+ DC_I2C_DATA,
+ DC_I2C_DATA);
+
+ --length;
+ }
+}
+
+static enum i2c_channel_operation_result get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes)
+{
+ uint32_t i2c_sw_status = 0;
+ uint32_t value = dm_read_reg(engine->base.ctx, mmDC_I2C_SW_STATUS);
+
+ i2c_sw_status = get_reg_field_value(
+ value,
+ DC_I2C_SW_STATUS,
+ DC_I2C_SW_STATUS);
+
+ if (i2c_sw_status == DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW)
+ return I2C_CHANNEL_OPERATION_ENGINE_BUSY;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK)
+ return I2C_CHANNEL_OPERATION_NO_RESPONSE;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK)
+ return I2C_CHANNEL_OPERATION_TIMEOUT;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK)
+ return I2C_CHANNEL_OPERATION_FAILED;
+ else if (value & DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ /*
+ * this is the case when HW used for communication, I2C_SW_STATUS
+ * could be zero
+ */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+static uint32_t get_hw_buffer_available_size(
+ const struct i2c_hw_engine *engine)
+{
+ return I2C_HW_BUFFER_SIZE -
+ FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes;
+}
+
+static uint32_t get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ uint32_t speed = engine->base.funcs->get_speed(&engine->base);
+
+ uint32_t period_timeout;
+ uint32_t num_of_clock_stretches;
+
+ if (!speed)
+ return 0;
+
+ period_timeout = (1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed;
+
+ num_of_clock_stretches = 1 + (length << 3) + 1;
+ num_of_clock_stretches +=
+ (FROM_I2C_HW_ENGINE(engine)->buffer_used_bytes << 3) +
+ (FROM_I2C_HW_ENGINE(engine)->transaction_count << 1);
+
+ return period_timeout * num_of_clock_stretches;
+}
+
+/*
+ * @brief
+ * DC_I2C_DDC1_SETUP MM register offsets
+ *
+ * @note
+ * The indices of this offset array are DDC engine IDs
+ */
+static const int32_t ddc_setup_offset[] = {
+
+ mmDC_I2C_DDC1_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 1 */
+ mmDC_I2C_DDC2_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 2 */
+ mmDC_I2C_DDC3_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 3 */
+ mmDC_I2C_DDC4_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 4 */
+ mmDC_I2C_DDC5_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 5 */
+ mmDC_I2C_DDC6_SETUP - mmDC_I2C_DDC1_SETUP, /* DDC Engine 6 */
+ mmDC_I2C_DDCVGA_SETUP - mmDC_I2C_DDC1_SETUP /* DDC Engine 7 */
+};
+
+/*
+ * @brief
+ * DC_I2C_DDC1_SPEED MM register offsets
+ *
+ * @note
+ * The indices of this offset array are DDC engine IDs
+ */
+static const int32_t ddc_speed_offset[] = {
+ mmDC_I2C_DDC1_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 1 */
+ mmDC_I2C_DDC2_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 2 */
+ mmDC_I2C_DDC3_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 3 */
+ mmDC_I2C_DDC4_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 4 */
+ mmDC_I2C_DDC5_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 5 */
+ mmDC_I2C_DDC6_SPEED - mmDC_I2C_DDC1_SPEED, /* DDC Engine 6 */
+ mmDC_I2C_DDCVGA_SPEED - mmDC_I2C_DDC1_SPEED /* DDC Engine 7 */
+};
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .destroy = destroy,
+ .get_speed = get_speed,
+ .set_speed = set_speed,
+ .setup_engine = setup_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .get_channel_status = get_channel_status,
+ .acquire_engine = dal_i2c_hw_engine_acquire_engine,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_hw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_hw_engine_submit_request,
+};
+
+static const struct i2c_hw_engine_funcs i2c_hw_engine_funcs = {
+ .get_hw_buffer_available_size =
+ get_hw_buffer_available_size,
+ .get_transaction_timeout =
+ get_transaction_timeout,
+ .wait_on_operation_result =
+ dal_i2c_hw_engine_wait_on_operation_result,
+};
+
+static void construct(
+ struct i2c_hw_engine_dce80 *engine,
+ const struct i2c_hw_engine_dce80_create_arg *arg)
+{
+ dal_i2c_hw_engine_construct(&engine->base, arg->ctx);
+
+ engine->base.base.base.funcs = &engine_funcs;
+ engine->base.base.funcs = &i2c_engine_funcs;
+ engine->base.funcs = &i2c_hw_engine_funcs;
+ engine->base.default_speed = arg->default_speed;
+ engine->addr.DC_I2C_DDCX_SETUP =
+ mmDC_I2C_DDC1_SETUP + ddc_setup_offset[arg->engine_id];
+ engine->addr.DC_I2C_DDCX_SPEED =
+ mmDC_I2C_DDC1_SPEED + ddc_speed_offset[arg->engine_id];
+
+ engine->engine_id = arg->engine_id;
+ engine->reference_frequency = arg->reference_frequency;
+ engine->buffer_used_bytes = 0;
+ engine->transaction_count = 0;
+ engine->engine_keep_power_up_count = 1;
+}
+
+struct i2c_engine *dal_i2c_hw_engine_dce80_create(
+ const struct i2c_hw_engine_dce80_create_arg *arg)
+{
+ struct i2c_hw_engine_dce80 *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if ((arg->engine_id >= sizeof(ddc_setup_offset) / sizeof(int32_t)) ||
+ (arg->engine_id >= sizeof(ddc_speed_offset) / sizeof(int32_t)) ||
+ !arg->reference_frequency) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_hw_engine_dce80), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(engine, arg);
+ return &engine->base.base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
new file mode 100644
index 000000000000..5c6116fb5479
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_hw_engine_dce80.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_DCE80_H__
+#define __DAL_I2C_HW_ENGINE_DCE80_H__
+
+struct i2c_hw_engine_dce80 {
+ struct i2c_hw_engine base;
+ struct {
+ uint32_t DC_I2C_DDCX_SETUP;
+ uint32_t DC_I2C_DDCX_SPEED;
+ } addr;
+ uint32_t engine_id;
+ /* expressed in kilohertz */
+ uint32_t reference_frequency;
+ /* number of bytes currently used in HW buffer */
+ uint32_t buffer_used_bytes;
+ /* number of pending transactions (before GO) */
+ uint32_t transaction_count;
+ uint32_t engine_keep_power_up_count;
+};
+
+struct i2c_hw_engine_dce80_create_arg {
+ uint32_t engine_id;
+ uint32_t reference_frequency;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_hw_engine_dce80_create(
+ const struct i2c_hw_engine_dce80_create_arg *arg);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
new file mode 100644
index 000000000000..4853ee26096a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+/*
+ * This unit
+ */
+
+static const uint32_t ddc_hw_status_addr[] = {
+ mmDC_I2C_DDC1_HW_STATUS,
+ mmDC_I2C_DDC2_HW_STATUS,
+ mmDC_I2C_DDC3_HW_STATUS,
+ mmDC_I2C_DDC4_HW_STATUS,
+ mmDC_I2C_DDC5_HW_STATUS,
+ mmDC_I2C_DDC6_HW_STATUS,
+ mmDC_I2C_DDCVGA_HW_STATUS
+};
+
+/*
+ * @brief
+ * Cast 'struct i2c_sw_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_SW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine_dce80, base)
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_SW_ENGINE(container_of((ptr), struct i2c_sw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine_dce80 *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+static void release_engine(
+ struct engine *engine)
+{
+
+}
+
+static void destruct(
+ struct i2c_sw_engine_dce80 *engine)
+{
+ dal_i2c_sw_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **engine)
+{
+ struct i2c_sw_engine_dce80 *sw_engine = FROM_I2C_ENGINE(*engine);
+
+ destruct(sw_engine);
+
+ kfree(sw_engine);
+
+ *engine = NULL;
+}
+
+static bool acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle)
+{
+ return dal_i2caux_i2c_sw_engine_acquire_engine(engine, ddc_handle);
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+static void construct(
+ struct i2c_sw_engine_dce80 *engine,
+ const struct i2c_sw_engine_dce80_create_arg *arg)
+{
+ struct i2c_sw_engine_create_arg arg_base;
+
+ arg_base.ctx = arg->ctx;
+ arg_base.default_speed = arg->default_speed;
+
+ dal_i2c_sw_engine_construct(&engine->base, &arg_base);
+
+ engine->base.base.base.funcs = &engine_funcs;
+ engine->base.base.funcs = &i2c_engine_funcs;
+ engine->base.default_speed = arg->default_speed;
+ engine->engine_id = arg->engine_id;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_dce80_create(
+ const struct i2c_sw_engine_dce80_create_arg *arg)
+{
+ struct i2c_sw_engine_dce80 *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_sw_engine_dce80), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(engine, arg);
+ return &engine->base.base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
new file mode 100644
index 000000000000..26355c088746
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2c_sw_engine_dce80.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_DCE80_H__
+#define __DAL_I2C_SW_ENGINE_DCE80_H__
+
+struct i2c_sw_engine_dce80 {
+ struct i2c_sw_engine base;
+ uint32_t engine_id;
+};
+
+struct i2c_sw_engine_dce80_create_arg {
+ uint32_t engine_id;
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+struct i2c_engine *dal_i2c_sw_engine_dce80_create(
+ const struct i2c_sw_engine_dce80_create_arg *arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
new file mode 100644
index 000000000000..ed48596dd2a5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2caux_dce80.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "i2c_sw_engine_dce80.h"
+#include "../i2c_hw_engine.h"
+#include "i2c_hw_engine_dce80.h"
+#include "../i2c_generic_hw_engine.h"
+#include "../aux_engine.h"
+
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+
+/* set register offset */
+#define SR(reg_name)\
+ .reg_name = mm ## reg_name
+
+/* set register offset with instance */
+#define SRI(reg_name, block, id)\
+ .reg_name = mm ## block ## id ## _ ## reg_name
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers dce80_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5)
+};
+
+/*
+ * This unit
+ */
+
+#define FROM_I2C_AUX(ptr) \
+ container_of((ptr), struct i2caux_dce80, base)
+
+static void destruct(
+ struct i2caux_dce80 *i2caux_dce80)
+{
+ dal_i2caux_destruct(&i2caux_dce80->base);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(*i2c_engine);
+
+ destruct(i2caux_dce80);
+
+ kfree(i2caux_dce80);
+
+ *i2c_engine = NULL;
+}
+
+static struct i2c_engine *acquire_i2c_hw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ struct i2caux_dce80 *i2caux_dce80 = FROM_I2C_AUX(i2caux);
+
+ struct i2c_engine *engine = NULL;
+ bool non_generic;
+
+ if (!ddc)
+ return NULL;
+
+ if (ddc->hw_info.hw_supported) {
+ enum gpio_ddc_line line = dal_ddc_get_line(ddc);
+
+ if (line < GPIO_DDC_LINE_COUNT) {
+ non_generic = true;
+ engine = i2caux->i2c_hw_engines[line];
+ }
+ }
+
+ if (!engine) {
+ non_generic = false;
+ engine = i2caux->i2c_generic_hw_engine;
+ }
+
+ if (!engine)
+ return NULL;
+
+ if (non_generic) {
+ if (!i2caux_dce80->i2c_hw_buffer_in_use &&
+ engine->base.funcs->acquire(&engine->base, ddc)) {
+ i2caux_dce80->i2c_hw_buffer_in_use = true;
+ return engine;
+ }
+ } else {
+ if (engine->base.funcs->acquire(&engine->base, ddc))
+ return engine;
+ }
+
+ return NULL;
+}
+
+static void release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ if (engine->funcs->get_engine_type(engine) ==
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW)
+ FROM_I2C_AUX(i2caux)->i2c_hw_buffer_in_use = false;
+
+ dal_i2caux_release_engine(i2caux, engine);
+}
+
+static const enum gpio_ddc_line hw_ddc_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+ GPIO_DDC_LINE_DDC_VGA
+};
+
+static const enum gpio_ddc_line hw_aux_lines[] = {
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6
+};
+
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = acquire_i2c_hw_engine,
+ .release_engine = release_engine,
+ .acquire_i2c_sw_engine = dal_i2caux_acquire_i2c_sw_engine,
+ .acquire_aux_engine = dal_i2caux_acquire_aux_engine,
+};
+
+static void construct(
+ struct i2caux_dce80 *i2caux_dce80,
+ struct dc_context *ctx)
+{
+ /* Entire family have I2C engine reference clock frequency
+ * changed from XTALIN (27) to XTALIN/2 (13.5) */
+
+ struct i2caux *base = &i2caux_dce80->base;
+
+ uint32_t reference_frequency =
+ dal_i2caux_get_reference_clock(ctx->dc_bios) >> 1;
+
+ /*bool use_i2c_sw_engine = dal_adapter_service_is_feature_supported(as,
+ FEATURE_RESTORE_USAGE_I2C_SW_ENGINE);*/
+
+ /* Use SWI2C for dce8 currently, sicne we have bug with hwi2c */
+ bool use_i2c_sw_engine = true;
+
+ uint32_t i;
+
+ dal_i2caux_construct(base, ctx);
+
+ i2caux_dce80->base.funcs = &i2caux_funcs;
+ i2caux_dce80->i2c_hw_buffer_in_use = false;
+
+ /* Create I2C HW engines (HW + SW pairs)
+ * for all lines which has assisted HW DDC
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_ddc_lines[i];
+
+ struct i2c_hw_engine_dce80_create_arg hw_arg;
+
+ if (use_i2c_sw_engine) {
+ struct i2c_sw_engine_dce80_create_arg sw_arg;
+
+ sw_arg.engine_id = i;
+ sw_arg.default_speed = base->default_i2c_sw_speed;
+ sw_arg.ctx = ctx;
+ base->i2c_sw_engines[line_id] =
+ dal_i2c_sw_engine_dce80_create(&sw_arg);
+ }
+
+ hw_arg.engine_id = i;
+ hw_arg.reference_frequency = reference_frequency;
+ hw_arg.default_speed = base->default_i2c_hw_speed;
+ hw_arg.ctx = ctx;
+
+ base->i2c_hw_engines[line_id] =
+ dal_i2c_hw_engine_dce80_create(&hw_arg);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_ddc_lines));
+
+ /* Create AUX engines for all lines which has assisted HW AUX
+ * 'i' (loop counter) used as DDC/AUX engine_id */
+
+ i = 0;
+
+ do {
+ enum gpio_ddc_line line_id = hw_aux_lines[i];
+
+ struct aux_engine_dce110_init_data arg;
+
+ arg.engine_id = i;
+ arg.timeout_period = base->aux_timeout_period;
+ arg.ctx = ctx;
+ arg.regs = &dce80_aux_regs[i];
+
+ base->aux_engines[line_id] =
+ dal_aux_engine_dce110_create(&arg);
+
+ ++i;
+ } while (i < ARRAY_SIZE(hw_aux_lines));
+
+ /* TODO Generic I2C SW and HW */
+}
+
+struct i2caux *dal_i2caux_dce80_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce80 *i2caux_dce80 =
+ kzalloc(sizeof(struct i2caux_dce80), GFP_KERNEL);
+
+ if (!i2caux_dce80) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ construct(i2caux_dce80, ctx);
+ return &i2caux_dce80->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
new file mode 100644
index 000000000000..21908629e973
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce80/i2caux_dce80.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCE80_H__
+#define __DAL_I2C_AUX_DCE80_H__
+
+struct i2caux_dce80 {
+ struct i2caux base;
+ /* indicate the I2C HW circular buffer is in use */
+ bool i2c_hw_buffer_in_use;
+};
+
+struct i2caux *dal_i2caux_dce80_create(
+ struct dc_context *ctx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
new file mode 100644
index 000000000000..13b807d8aff8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+#include "../dce110/aux_engine_dce110.h"
+#include "../dce110/i2c_hw_engine_dce110.h"
+#include "../dce110/i2caux_dce110.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+/* macros to expend register list macro defined in HW object header file
+ * end *********************/
+
+#define aux_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = DP_AUX0_AUX_CONTROL__AUX_RESET_MASK \
+}
+
+#define hw_engine_regs(id)\
+{\
+ I2C_HW_ENGINE_COMMON_REG_LIST(id) \
+}
+
+static const struct dce110_aux_registers dcn10_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+ aux_regs(2),
+ aux_regs(3),
+ aux_regs(4),
+ aux_regs(5),
+};
+
+static const struct dce110_i2c_hw_engine_registers dcn10_hw_engine_regs[] = {
+ hw_engine_regs(1),
+ hw_engine_regs(2),
+ hw_engine_regs(3),
+ hw_engine_regs(4),
+ hw_engine_regs(5),
+ hw_engine_regs(6)
+};
+
+static const struct dce110_i2c_hw_engine_shift i2c_shift = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(__SHIFT)
+};
+
+static const struct dce110_i2c_hw_engine_mask i2c_mask = {
+ I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)
+};
+
+struct i2caux *dal_i2caux_dcn10_create(
+ struct dc_context *ctx)
+{
+ struct i2caux_dce110 *i2caux_dce110 =
+ kzalloc(sizeof(struct i2caux_dce110), GFP_KERNEL);
+
+ if (!i2caux_dce110) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ dal_i2caux_dce110_construct(i2caux_dce110,
+ ctx,
+ dcn10_aux_regs,
+ dcn10_hw_engine_regs,
+ &i2c_shift,
+ &i2c_mask);
+ return &i2caux_dce110->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
new file mode 100644
index 000000000000..aeb4a86463d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/dcn10/i2caux_dcn10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DCN10_H__
+#define __DAL_I2C_AUX_DCN10_H__
+
+struct i2caux *dal_i2caux_dcn10_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
new file mode 100644
index 000000000000..e6408f644086
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "../i2caux.h"
+#include "../engine.h"
+#include "../i2c_engine.h"
+#include "../i2c_sw_engine.h"
+#include "../i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+#include "i2caux_diag.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+static void destruct(
+ struct i2caux *i2caux)
+{
+ dal_i2caux_destruct(i2caux);
+}
+
+static void destroy(
+ struct i2caux **i2c_engine)
+{
+ destruct(*i2c_engine);
+
+ kfree(*i2c_engine);
+
+ *i2c_engine = NULL;
+}
+
+/* function table */
+static const struct i2caux_funcs i2caux_funcs = {
+ .destroy = destroy,
+ .acquire_i2c_hw_engine = NULL,
+ .release_engine = NULL,
+ .acquire_i2c_sw_engine = NULL,
+ .acquire_aux_engine = NULL,
+};
+
+static void construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct(i2caux, ctx);
+ i2caux->funcs = &i2caux_funcs;
+}
+
+struct i2caux *dal_i2caux_diag_fpga_create(
+ struct dc_context *ctx)
+{
+ struct i2caux *i2caux = kzalloc(sizeof(struct i2caux),
+ GFP_KERNEL);
+
+ if (!i2caux) {
+ ASSERT_CRITICAL(false);
+ return NULL;
+ }
+
+ construct(i2caux, ctx);
+ return i2caux;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
new file mode 100644
index 000000000000..a83eeb748283
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/diagnostics/i2caux_diag.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_DIAG_FPGA_H__
+#define __DAL_I2C_AUX_DIAG_FPGA_H__
+
+struct i2caux *dal_i2caux_diag_fpga_create(
+ struct dc_context *ctx);
+
+#endif /* __DAL_I2C_AUX_DIAG_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
new file mode 100644
index 000000000000..33de8a8834dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_ENGINE_H__
+#define __DAL_ENGINE_H__
+
+enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+ enum i2caux_transaction_address_space address_space;
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW
+};
+
+struct i2caux_transaction_request {
+ enum i2caux_transaction_operation operation;
+ struct i2caux_transaction_payload payload;
+ enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+ I2CAUX_ENGINE_TYPE_AUX,
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+enum i2caux_transaction_action {
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE = 0x00,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ = 0x10,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST = 0x20,
+
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT = 0x40,
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT = 0x50,
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT = 0x60,
+
+ I2CAUX_TRANSACTION_ACTION_DP_WRITE = 0x80,
+ I2CAUX_TRANSACTION_ACTION_DP_READ = 0x90
+};
+
+struct engine;
+
+struct engine_funcs {
+ enum i2caux_engine_type (*get_engine_type)(
+ const struct engine *engine);
+ bool (*acquire)(
+ struct engine *engine,
+ struct ddc *ddc);
+ bool (*submit_request)(
+ struct engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+ void (*release_engine)(
+ struct engine *engine);
+};
+
+struct engine {
+ const struct engine_funcs *funcs;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+};
+
+void dal_i2caux_construct_engine(
+ struct engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2caux_destruct_engine(
+ struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
new file mode 100644
index 000000000000..5d155d36d353
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine_base.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "engine.h"
+
+void dal_i2caux_construct_engine(
+ struct engine *engine,
+ struct dc_context *ctx)
+{
+ engine->ddc = NULL;
+ engine->ctx = ctx;
+}
+
+void dal_i2caux_destruct_engine(
+ struct engine *engine)
+{
+ /* nothing to do */
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
new file mode 100644
index 000000000000..70e20bd47ce4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+#define FROM_ENGINE(ptr) \
+ container_of((ptr), struct i2c_engine, base)
+
+bool dal_i2c_engine_acquire(
+ struct engine *engine,
+ struct ddc *ddc_handle)
+{
+ struct i2c_engine *i2c_engine = FROM_ENGINE(engine);
+
+ uint32_t counter = 0;
+ bool result;
+
+ do {
+ result = i2c_engine->funcs->acquire_engine(
+ i2c_engine, ddc_handle);
+
+ if (result)
+ break;
+
+ /* i2c_engine is busy by VBios, lets wait and retry */
+
+ udelay(10);
+
+ ++counter;
+ } while (counter < 2);
+
+ if (result) {
+ if (!i2c_engine->funcs->setup_engine(i2c_engine)) {
+ engine->funcs->release_engine(engine);
+ result = false;
+ }
+ }
+
+ return result;
+}
+
+bool dal_i2c_engine_setup_i2c_engine(
+ struct i2c_engine *engine)
+{
+ /* Derivative classes do not have to override this */
+
+ return true;
+}
+
+void dal_i2c_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request)
+{
+
+}
+
+void dal_i2c_engine_process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply)
+{
+
+}
+
+void dal_i2c_engine_construct(
+ struct i2c_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2caux_construct_engine(&engine->base, ctx);
+ engine->timeout_delay = 0;
+}
+
+void dal_i2c_engine_destruct(
+ struct i2c_engine *engine)
+{
+ dal_i2caux_destruct_engine(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
new file mode 100644
index 000000000000..58fc0f25eceb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_engine.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_ENGINE_H__
+#define __DAL_I2C_ENGINE_H__
+
+enum i2c_channel_operation_result {
+ I2C_CHANNEL_OPERATION_SUCCEEDED,
+ I2C_CHANNEL_OPERATION_FAILED,
+ I2C_CHANNEL_OPERATION_NOT_GRANTED,
+ I2C_CHANNEL_OPERATION_IS_BUSY,
+ I2C_CHANNEL_OPERATION_NO_HANDLE_PROVIDED,
+ I2C_CHANNEL_OPERATION_CHANNEL_IN_USE,
+ I2C_CHANNEL_OPERATION_CHANNEL_CLIENT_MAX_ALLOWED,
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY,
+ I2C_CHANNEL_OPERATION_TIMEOUT,
+ I2C_CHANNEL_OPERATION_NO_RESPONSE,
+ I2C_CHANNEL_OPERATION_HW_REQUEST_I2C_BUS,
+ I2C_CHANNEL_OPERATION_WRONG_PARAMETER,
+ I2C_CHANNEL_OPERATION_OUT_NB_OF_RETRIES,
+ I2C_CHANNEL_OPERATION_NOT_STARTED
+};
+
+struct i2c_request_transaction_data {
+ enum i2caux_transaction_action action;
+ enum i2c_channel_operation_result status;
+ uint8_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+struct i2c_reply_transaction_data {
+ uint32_t length;
+ uint8_t *data;
+};
+
+struct i2c_engine;
+
+struct i2c_engine_funcs {
+ void (*destroy)(
+ struct i2c_engine **ptr);
+ uint32_t (*get_speed)(
+ const struct i2c_engine *engine);
+ void (*set_speed)(
+ struct i2c_engine *engine,
+ uint32_t speed);
+ bool (*acquire_engine)(
+ struct i2c_engine *engine,
+ struct ddc *ddc);
+ bool (*setup_engine)(
+ struct i2c_engine *engine);
+ void (*submit_channel_request)(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply);
+ enum i2c_channel_operation_result (*get_channel_status)(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes);
+};
+
+struct i2c_engine {
+ struct engine base;
+ const struct i2c_engine_funcs *funcs;
+ uint32_t timeout_delay;
+};
+
+void dal_i2c_engine_construct(
+ struct i2c_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_engine_destruct(
+ struct i2c_engine *engine);
+
+bool dal_i2c_engine_setup_i2c_engine(
+ struct i2c_engine *engine);
+
+void dal_i2c_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *request);
+
+void dal_i2c_engine_process_channel_reply(
+ struct i2c_engine *engine,
+ struct i2c_reply_transaction_data *reply);
+
+bool dal_i2c_engine_acquire(
+ struct engine *ptr,
+ struct ddc *ddc_handle);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
new file mode 100644
index 000000000000..5a4295e0fae5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.c
@@ -0,0 +1,284 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+#include "i2c_hw_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_generic_hw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_hw_engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_I2C_HW_ENGINE(ptr) \
+ container_of((ptr), struct i2c_generic_hw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ FROM_I2C_HW_ENGINE(container_of((ptr), struct i2c_hw_engine, base))
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_generic_hw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW;
+}
+
+/*
+ * @brief
+ * Single transaction handling.
+ * Since transaction may be bigger than HW buffer size,
+ * it divides transaction to sub-transactions
+ * and uses batch transaction feature of the engine.
+ */
+bool dal_i2c_generic_hw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_generic_hw_engine *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_hw_engine *base = &hw_engine->base;
+
+ uint32_t max_payload_size =
+ base->funcs->get_hw_buffer_available_size(base);
+
+ bool initial_stop_bit = !middle_of_transaction;
+
+ struct i2c_generic_transaction_attributes attributes;
+
+ enum i2c_channel_operation_result operation_result =
+ I2C_CHANNEL_OPERATION_FAILED;
+
+ bool result = false;
+
+ /* setup transaction initial properties */
+
+ uint8_t address = i2caux_request->payload.address;
+ uint8_t *current_payload = i2caux_request->payload.data;
+ uint32_t remaining_payload_size = i2caux_request->payload.length;
+
+ bool first_iteration = true;
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ attributes.action = I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ return false;
+ }
+
+ /* Do batch transaction.
+ * Divide read/write data into payloads which fit HW buffer size.
+ * 1. Single transaction:
+ * start_bit = 1, stop_bit depends on session state, ack_on_read = 0;
+ * 2. Start of batch transaction:
+ * start_bit = 1, stop_bit = 0, ack_on_read = 1;
+ * 3. Middle of batch transaction:
+ * start_bit = 0, stop_bit = 0, ack_on_read = 1;
+ * 4. End of batch transaction:
+ * start_bit = 0, stop_bit depends on session state, ack_on_read = 0.
+ * Session stop bit is set if 'middle_of_transaction' = 0. */
+
+ while (remaining_payload_size) {
+ uint32_t current_transaction_size;
+ uint32_t current_payload_size;
+
+ bool last_iteration;
+ bool stop_bit;
+
+ /* Calculate current transaction size and payload size.
+ * Transaction size = total number of bytes in transaction,
+ * including slave's address;
+ * Payload size = number of data bytes in transaction. */
+
+ if (first_iteration) {
+ /* In the first sub-transaction we send slave's address
+ * thus we need to reserve one byte for it */
+ current_transaction_size =
+ (remaining_payload_size > max_payload_size - 1) ?
+ max_payload_size :
+ remaining_payload_size + 1;
+
+ current_payload_size = current_transaction_size - 1;
+ } else {
+ /* Second and further sub-transactions will have
+ * entire buffer reserved for data */
+ current_transaction_size =
+ (remaining_payload_size > max_payload_size) ?
+ max_payload_size :
+ remaining_payload_size;
+
+ current_payload_size = current_transaction_size;
+ }
+
+ last_iteration =
+ (remaining_payload_size == current_payload_size);
+
+ stop_bit = last_iteration ? initial_stop_bit : false;
+
+ /* write slave device address */
+
+ if (first_iteration)
+ hw_engine->funcs->write_address(hw_engine, address);
+
+ /* write current portion of data, if requested */
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ hw_engine->funcs->write_data(
+ hw_engine,
+ current_payload,
+ current_payload_size);
+
+ /* execute transaction */
+
+ attributes.start_bit = first_iteration;
+ attributes.stop_bit = stop_bit;
+ attributes.last_read = last_iteration;
+ attributes.transaction_size = current_transaction_size;
+
+ hw_engine->funcs->execute_transaction(hw_engine, &attributes);
+
+ /* wait until transaction is processed; if it fails - quit */
+
+ operation_result = base->funcs->wait_on_operation_result(
+ base,
+ base->funcs->get_transaction_timeout(
+ base, current_transaction_size),
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+
+ if (operation_result != I2C_CHANNEL_OPERATION_SUCCEEDED)
+ break;
+
+ /* read current portion of data, if requested */
+
+ /* the read offset should be 1 for first sub-transaction,
+ * and 0 for any next one */
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ hw_engine->funcs->read_data(hw_engine, current_payload,
+ current_payload_size, first_iteration ? 1 : 0);
+
+ /* update loop variables */
+
+ first_iteration = false;
+ current_payload += current_payload_size;
+ remaining_payload_size -= current_payload_size;
+ }
+
+ /* update transaction status */
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ result = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ }
+
+ return result;
+}
+
+/*
+ * @brief
+ * Returns number of microseconds to wait until timeout to be considered
+ */
+uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length)
+{
+ const struct i2c_engine *base = &engine->base;
+
+ uint32_t speed = base->funcs->get_speed(base);
+
+ if (!speed)
+ return 0;
+
+ /* total timeout = period_timeout * (start + data bits count + stop) */
+
+ return ((1000 * TRANSACTION_TIMEOUT_IN_I2C_CLOCKS) / speed) *
+ (1 + (length << 3) + 1);
+}
+
+void dal_i2c_generic_hw_engine_construct(
+ struct i2c_generic_hw_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2c_hw_engine_construct(&engine->base, ctx);
+}
+
+void dal_i2c_generic_hw_engine_destruct(
+ struct i2c_generic_hw_engine *engine)
+{
+ dal_i2c_hw_engine_destruct(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
new file mode 100644
index 000000000000..1da0397b04a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_generic_hw_engine.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_GENERIC_HW_ENGINE_H__
+#define __DAL_I2C_GENERIC_HW_ENGINE_H__
+
+struct i2c_generic_transaction_attributes {
+ enum i2caux_transaction_action action;
+ uint32_t transaction_size;
+ bool start_bit;
+ bool stop_bit;
+ bool last_read;
+};
+
+struct i2c_generic_hw_engine;
+
+struct i2c_generic_hw_engine_funcs {
+ void (*write_address)(
+ struct i2c_generic_hw_engine *engine,
+ uint8_t address);
+ void (*write_data)(
+ struct i2c_generic_hw_engine *engine,
+ const uint8_t *buffer,
+ uint32_t length);
+ void (*read_data)(
+ struct i2c_generic_hw_engine *engine,
+ uint8_t *buffer,
+ uint32_t length,
+ uint32_t offset);
+ void (*execute_transaction)(
+ struct i2c_generic_hw_engine *engine,
+ struct i2c_generic_transaction_attributes *attributes);
+};
+
+struct i2c_generic_hw_engine {
+ struct i2c_hw_engine base;
+ const struct i2c_generic_hw_engine_funcs *funcs;
+};
+
+void dal_i2c_generic_hw_engine_construct(
+ struct i2c_generic_hw_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_generic_hw_engine_destruct(
+ struct i2c_generic_hw_engine *engine);
+enum i2caux_engine_type dal_i2c_generic_hw_engine_get_engine_type(
+ const struct engine *engine);
+bool dal_i2c_generic_hw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+uint32_t dal_i2c_generic_hw_engine_get_transaction_timeout(
+ const struct i2c_hw_engine *engine,
+ uint32_t length);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
new file mode 100644
index 000000000000..4b54fcfb28ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_hw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_hw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ container_of((ptr), struct i2c_hw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_hw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_DDC_HW;
+}
+
+bool dal_i2c_hw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_hw_engine *hw_engine = FROM_ENGINE(engine);
+
+ struct i2c_request_transaction_data request;
+
+ uint32_t transaction_timeout;
+
+ enum i2c_channel_operation_result operation_result;
+
+ bool result = false;
+
+ /* We need following:
+ * transaction length will not exceed
+ * the number of free bytes in HW buffer (minus one for address)*/
+
+ if (i2caux_request->payload.length >=
+ hw_engine->funcs->get_hw_buffer_available_size(hw_engine)) {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW;
+ return false;
+ }
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ /* [anaumov] in DAL2, there was no "return false" */
+ return false;
+ }
+
+ request.address = (uint8_t)i2caux_request->payload.address;
+ request.length = i2caux_request->payload.length;
+ request.data = i2caux_request->payload.data;
+
+ /* obtain timeout value before submitting request */
+
+ transaction_timeout = hw_engine->funcs->get_transaction_timeout(
+ hw_engine, i2caux_request->payload.length + 1);
+
+ hw_engine->base.funcs->submit_channel_request(
+ &hw_engine->base, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_FAILED) ||
+ (request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY)) {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
+ return false;
+ }
+
+ /* wait until transaction proceed */
+
+ operation_result = hw_engine->funcs->wait_on_operation_result(
+ hw_engine,
+ transaction_timeout,
+ I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+
+ /* update transaction status */
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ result = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ }
+
+ if (result && (i2caux_request->operation == I2CAUX_TRANSACTION_READ)) {
+ struct i2c_reply_transaction_data reply;
+
+ reply.data = i2caux_request->payload.data;
+ reply.length = i2caux_request->payload.length;
+
+ hw_engine->base.funcs->
+ process_channel_reply(&hw_engine->base, &reply);
+ }
+
+ return result;
+}
+
+bool dal_i2c_hw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc)
+{
+ enum gpio_result result;
+ uint32_t current_speed;
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ engine->base.ddc = ddc;
+
+ current_speed = engine->funcs->get_speed(engine);
+
+ if (current_speed)
+ FROM_I2C_ENGINE(engine)->original_speed = current_speed;
+
+ return true;
+}
+/*
+ * @brief
+ * Queries in a loop for current engine status
+ * until retrieved status matches 'expected_result', or timeout occurs.
+ * Timeout given in microseconds
+ * and the status query frequency is also one per microsecond.
+ */
+enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result)
+{
+ enum i2c_channel_operation_result result;
+ uint32_t i = 0;
+
+ if (!timeout)
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+
+ do {
+ result = engine->base.funcs->get_channel_status(
+ &engine->base, NULL);
+
+ if (result != expected_result)
+ break;
+
+ udelay(1);
+
+ ++i;
+ } while (i < timeout);
+
+ return result;
+}
+
+void dal_i2c_hw_engine_construct(
+ struct i2c_hw_engine *engine,
+ struct dc_context *ctx)
+{
+ dal_i2c_engine_construct(&engine->base, ctx);
+ engine->original_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
+ engine->default_speed = I2CAUX_DEFAULT_I2C_HW_SPEED;
+}
+
+void dal_i2c_hw_engine_destruct(
+ struct i2c_hw_engine *engine)
+{
+ dal_i2c_engine_destruct(&engine->base);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
new file mode 100644
index 000000000000..8936a994804a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_hw_engine.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_HW_ENGINE_H__
+#define __DAL_I2C_HW_ENGINE_H__
+
+enum {
+ TRANSACTION_TIMEOUT_IN_I2C_CLOCKS = 32
+};
+
+struct i2c_hw_engine;
+
+struct i2c_hw_engine_funcs {
+ uint32_t (*get_hw_buffer_available_size)(
+ const struct i2c_hw_engine *engine);
+ enum i2c_channel_operation_result (*wait_on_operation_result)(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result);
+ uint32_t (*get_transaction_timeout)(
+ const struct i2c_hw_engine *engine,
+ uint32_t length);
+};
+
+struct i2c_hw_engine {
+ struct i2c_engine base;
+ const struct i2c_hw_engine_funcs *funcs;
+
+ /* Values below are in kilohertz */
+ uint32_t original_speed;
+ uint32_t default_speed;
+};
+
+void dal_i2c_hw_engine_construct(
+ struct i2c_hw_engine *engine,
+ struct dc_context *ctx);
+
+void dal_i2c_hw_engine_destruct(
+ struct i2c_hw_engine *engine);
+
+enum i2c_channel_operation_result dal_i2c_hw_engine_wait_on_operation_result(
+ struct i2c_hw_engine *engine,
+ uint32_t timeout,
+ enum i2c_channel_operation_result expected_result);
+
+bool dal_i2c_hw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc);
+
+bool dal_i2c_hw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+
+enum i2caux_engine_type dal_i2c_hw_engine_get_engine_type(
+ const struct engine *engine);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
new file mode 100644
index 000000000000..8e19bb629394
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "engine.h"
+#include "i2c_engine.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2c_sw_engine.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+/*
+ * This unit
+ */
+
+#define SCL false
+#define SDA true
+
+static inline bool read_bit_from_ddc(
+ struct ddc *ddc,
+ bool data_nor_clock)
+{
+ uint32_t value = 0;
+
+ if (data_nor_clock)
+ dal_gpio_get_value(ddc->pin_data, &value);
+ else
+ dal_gpio_get_value(ddc->pin_clock, &value);
+
+ return (value != 0);
+}
+
+static inline void write_bit_to_ddc(
+ struct ddc *ddc,
+ bool data_nor_clock,
+ bool bit)
+{
+ uint32_t value = bit ? 1 : 0;
+
+ if (data_nor_clock)
+ dal_gpio_set_value(ddc->pin_data, value);
+ else
+ dal_gpio_set_value(ddc->pin_clock, value);
+}
+
+static bool wait_for_scl_high(
+ struct dc_context *ctx,
+ struct ddc *ddc,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t scl_retry = 0;
+ uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4;
+
+ udelay(clock_delay_div_4);
+
+ /* 3 milliseconds delay
+ * to wake up some displays from "low power" state.
+ */
+
+ do {
+ if (read_bit_from_ddc(ddc, SCL))
+ return true;
+
+ udelay(clock_delay_div_4);
+
+ ++scl_retry;
+ } while (scl_retry <= scl_retry_max);
+
+ return false;
+}
+
+static bool start_sync(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t retry = 0;
+
+ /* The I2C communications start signal is:
+ * the SDA going low from high, while the SCL is high. */
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ udelay(clock_delay_div_4);
+
+ do {
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ if (!read_bit_from_ddc(ddc_handle, SDA)) {
+ ++retry;
+ continue;
+ }
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ break;
+
+ write_bit_to_ddc(ddc_handle, SDA, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ return true;
+ } while (retry <= I2C_SW_RETRIES);
+
+ return false;
+}
+
+static bool stop_sync(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4)
+{
+ uint32_t retry = 0;
+
+ /* The I2C communications stop signal is:
+ * the SDA going high from low, while the SCL is high. */
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ do {
+ udelay(clock_delay_div_4);
+
+ if (read_bit_from_ddc(ddc_handle, SDA))
+ return true;
+
+ ++retry;
+ } while (retry <= 2);
+
+ return false;
+}
+
+static bool write_byte(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t byte)
+{
+ int32_t shift = 7;
+ bool ack;
+
+ /* bits are transmitted serially, starting from MSB */
+
+ do {
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, (byte >> shift) & 1);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ --shift;
+ } while (shift >= 0);
+
+ /* The display sends ACK by preventing the SDA from going high
+ * after the SCL pulse we use to send our last data bit.
+ * If the SDA goes high after that bit, it's a NACK */
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ /* read ACK bit */
+
+ ack = !read_bit_from_ddc(ddc_handle, SDA);
+
+ udelay(clock_delay_div_4 << 1);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4 << 1);
+
+ return ack;
+}
+
+static bool read_byte(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t *byte,
+ bool more)
+{
+ int32_t shift = 7;
+
+ uint8_t data = 0;
+
+ /* The data bits are read from MSB to LSB;
+ * bit is read while SCL is high */
+
+ do {
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ if (read_bit_from_ddc(ddc_handle, SDA))
+ data |= (1 << shift);
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4 << 1);
+
+ --shift;
+ } while (shift >= 0);
+
+ /* read only whole byte */
+
+ *byte = data;
+
+ udelay(clock_delay_div_4);
+
+ /* send the acknowledge bit:
+ * SDA low means ACK, SDA high means NACK */
+
+ write_bit_to_ddc(ddc_handle, SDA, !more);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SCL, true);
+
+ if (!wait_for_scl_high(ctx, ddc_handle, clock_delay_div_4))
+ return false;
+
+ write_bit_to_ddc(ddc_handle, SCL, false);
+
+ udelay(clock_delay_div_4);
+
+ write_bit_to_ddc(ddc_handle, SDA, true);
+
+ udelay(clock_delay_div_4);
+
+ return true;
+}
+
+static bool i2c_write(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t address,
+ uint32_t length,
+ const uint8_t *data)
+{
+ uint32_t i = 0;
+
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
+ return false;
+
+ while (i < length) {
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, data[i]))
+ return false;
+ ++i;
+ }
+
+ return true;
+}
+
+static bool i2c_read(
+ struct dc_context *ctx,
+ struct ddc *ddc_handle,
+ uint16_t clock_delay_div_4,
+ uint8_t address,
+ uint32_t length,
+ uint8_t *data)
+{
+ uint32_t i = 0;
+
+ if (!write_byte(ctx, ddc_handle, clock_delay_div_4, address))
+ return false;
+
+ while (i < length) {
+ if (!read_byte(ctx, ddc_handle, clock_delay_div_4, data + i,
+ i < length - 1))
+ return false;
+ ++i;
+ }
+
+ return true;
+}
+
+/*
+ * @brief
+ * Cast 'struct i2c_engine *'
+ * to 'struct i2c_sw_engine *'
+ */
+#define FROM_I2C_ENGINE(ptr) \
+ container_of((ptr), struct i2c_sw_engine, base)
+
+/*
+ * @brief
+ * Cast 'struct engine *'
+ * to 'struct i2c_sw_engine *'
+ */
+#define FROM_ENGINE(ptr) \
+ FROM_I2C_ENGINE(container_of((ptr), struct i2c_engine, base))
+
+enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
+ const struct engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_I2C_SW;
+}
+
+bool dal_i2c_sw_engine_submit_request(
+ struct engine *engine,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction)
+{
+ struct i2c_sw_engine *sw_engine = FROM_ENGINE(engine);
+
+ struct i2c_engine *base = &sw_engine->base;
+
+ struct i2c_request_transaction_data request;
+ bool operation_succeeded = false;
+
+ if (i2caux_request->operation == I2CAUX_TRANSACTION_READ)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ else if (i2caux_request->operation == I2CAUX_TRANSACTION_WRITE)
+ request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else {
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION;
+ /* in DAL2, there was no "return false" */
+ return false;
+ }
+
+ request.address = (uint8_t)i2caux_request->payload.address;
+ request.length = i2caux_request->payload.length;
+ request.data = i2caux_request->payload.data;
+
+ base->funcs->submit_channel_request(base, &request);
+
+ if ((request.status == I2C_CHANNEL_OPERATION_ENGINE_BUSY) ||
+ (request.status == I2C_CHANNEL_OPERATION_FAILED))
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY;
+ else {
+ enum i2c_channel_operation_result operation_result;
+
+ do {
+ operation_result =
+ base->funcs->get_channel_status(base, NULL);
+
+ switch (operation_result) {
+ case I2C_CHANNEL_OPERATION_SUCCEEDED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ operation_succeeded = true;
+ break;
+ case I2C_CHANNEL_OPERATION_NO_RESPONSE:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ break;
+ case I2C_CHANNEL_OPERATION_TIMEOUT:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ break;
+ case I2C_CHANNEL_OPERATION_FAILED:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE;
+ break;
+ default:
+ i2caux_request->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION;
+ break;
+ }
+ } while (operation_result == I2C_CHANNEL_OPERATION_ENGINE_BUSY);
+ }
+
+ return operation_succeeded;
+}
+
+uint32_t dal_i2c_sw_engine_get_speed(
+ const struct i2c_engine *engine)
+{
+ return FROM_I2C_ENGINE(engine)->speed;
+}
+
+void dal_i2c_sw_engine_set_speed(
+ struct i2c_engine *engine,
+ uint32_t speed)
+{
+ struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
+
+ ASSERT(speed);
+
+ sw_engine->speed = speed ? speed : I2CAUX_DEFAULT_I2C_SW_SPEED;
+
+ sw_engine->clock_delay = 1000 / sw_engine->speed;
+
+ if (sw_engine->clock_delay < 12)
+ sw_engine->clock_delay = 12;
+}
+
+bool dal_i2caux_i2c_sw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc)
+{
+ enum gpio_result result;
+
+ result = dal_ddc_open(ddc, GPIO_MODE_FAST_OUTPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ engine->base.ddc = ddc;
+
+ return true;
+}
+
+void dal_i2c_sw_engine_submit_channel_request(
+ struct i2c_engine *engine,
+ struct i2c_request_transaction_data *req)
+{
+ struct i2c_sw_engine *sw_engine = FROM_I2C_ENGINE(engine);
+
+ struct ddc *ddc = engine->base.ddc;
+ uint16_t clock_delay_div_4 = sw_engine->clock_delay >> 2;
+
+ /* send sync (start / repeated start) */
+
+ bool result = start_sync(engine->base.ctx, ddc, clock_delay_div_4);
+
+ /* process payload */
+
+ if (result) {
+ switch (req->action) {
+ case I2CAUX_TRANSACTION_ACTION_I2C_WRITE:
+ case I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT:
+ result = i2c_write(engine->base.ctx, ddc, clock_delay_div_4,
+ req->address, req->length, req->data);
+ break;
+ case I2CAUX_TRANSACTION_ACTION_I2C_READ:
+ case I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT:
+ result = i2c_read(engine->base.ctx, ddc, clock_delay_div_4,
+ req->address, req->length, req->data);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ }
+
+ /* send stop if not 'mot' or operation failed */
+
+ if (!result ||
+ (req->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (req->action == I2CAUX_TRANSACTION_ACTION_I2C_READ))
+ if (!stop_sync(engine->base.ctx, ddc, clock_delay_div_4))
+ result = false;
+
+ req->status = result ?
+ I2C_CHANNEL_OPERATION_SUCCEEDED :
+ I2C_CHANNEL_OPERATION_FAILED;
+}
+
+enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes)
+{
+ /* No arbitration with VBIOS is performed since DCE 6.0 */
+ return I2C_CHANNEL_OPERATION_SUCCEEDED;
+}
+
+void dal_i2c_sw_engine_destruct(
+ struct i2c_sw_engine *engine)
+{
+ dal_i2c_engine_destruct(&engine->base);
+}
+
+static void destroy(
+ struct i2c_engine **ptr)
+{
+ dal_i2c_sw_engine_destruct(FROM_I2C_ENGINE(*ptr));
+
+ kfree(*ptr);
+ *ptr = NULL;
+}
+
+static const struct i2c_engine_funcs i2c_engine_funcs = {
+ .acquire_engine = dal_i2caux_i2c_sw_engine_acquire_engine,
+ .destroy = destroy,
+ .get_speed = dal_i2c_sw_engine_get_speed,
+ .set_speed = dal_i2c_sw_engine_set_speed,
+ .setup_engine = dal_i2c_engine_setup_i2c_engine,
+ .submit_channel_request = dal_i2c_sw_engine_submit_channel_request,
+ .process_channel_reply = dal_i2c_engine_process_channel_reply,
+ .get_channel_status = dal_i2c_sw_engine_get_channel_status,
+};
+
+static void release_engine(
+ struct engine *engine)
+{
+
+}
+
+static const struct engine_funcs engine_funcs = {
+ .release_engine = release_engine,
+ .get_engine_type = dal_i2c_sw_engine_get_engine_type,
+ .acquire = dal_i2c_engine_acquire,
+ .submit_request = dal_i2c_sw_engine_submit_request,
+};
+
+void dal_i2c_sw_engine_construct(
+ struct i2c_sw_engine *engine,
+ const struct i2c_sw_engine_create_arg *arg)
+{
+ dal_i2c_engine_construct(&engine->base, arg->ctx);
+ dal_i2c_sw_engine_set_speed(&engine->base, arg->default_speed);
+ engine->base.funcs = &i2c_engine_funcs;
+ engine->base.base.funcs = &engine_funcs;
+}
+
+struct i2c_engine *dal_i2c_sw_engine_create(
+ const struct i2c_sw_engine_create_arg *arg)
+{
+ struct i2c_sw_engine *engine;
+
+ if (!arg) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ engine = kzalloc(sizeof(struct i2c_sw_engine), GFP_KERNEL);
+
+ if (!engine) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dal_i2c_sw_engine_construct(engine, arg);
+ return &engine->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
new file mode 100644
index 000000000000..546f15b0d3f1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2c_sw_engine.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_SW_ENGINE_H__
+#define __DAL_I2C_SW_ENGINE_H__
+
+enum {
+ I2C_SW_RETRIES = 10,
+ I2C_SW_SCL_READ_RETRIES = 128,
+ /* following value is in microseconds */
+ I2C_SW_TIMEOUT_DELAY = 3000
+};
+
+struct i2c_sw_engine;
+
+struct i2c_sw_engine {
+ struct i2c_engine base;
+ uint32_t clock_delay;
+ /* Values below are in KHz */
+ uint32_t speed;
+ uint32_t default_speed;
+};
+
+struct i2c_sw_engine_create_arg {
+ uint32_t default_speed;
+ struct dc_context *ctx;
+};
+
+void dal_i2c_sw_engine_construct(
+ struct i2c_sw_engine *engine,
+ const struct i2c_sw_engine_create_arg *arg);
+
+bool dal_i2caux_i2c_sw_engine_acquire_engine(
+ struct i2c_engine *engine,
+ struct ddc *ddc_handle);
+
+void dal_i2c_sw_engine_destruct(
+ struct i2c_sw_engine *engine);
+
+struct i2c_engine *dal_i2c_sw_engine_create(
+ const struct i2c_sw_engine_create_arg *arg);
+enum i2caux_engine_type dal_i2c_sw_engine_get_engine_type(
+ const struct engine *engine);
+bool dal_i2c_sw_engine_submit_request(
+ struct engine *ptr,
+ struct i2caux_transaction_request *i2caux_request,
+ bool middle_of_transaction);
+uint32_t dal_i2c_sw_engine_get_speed(
+ const struct i2c_engine *engine);
+void dal_i2c_sw_engine_set_speed(
+ struct i2c_engine *ptr,
+ uint32_t speed);
+void dal_i2c_sw_engine_submit_channel_request(
+ struct i2c_engine *ptr,
+ struct i2c_request_transaction_data *req);
+enum i2c_channel_operation_result dal_i2c_sw_engine_get_channel_status(
+ struct i2c_engine *engine,
+ uint8_t *returned_bytes);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
new file mode 100644
index 000000000000..e1593ffe5a2b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+/*
+ * Pre-requisites: headers required by header of this unit
+ */
+#include "include/i2caux_interface.h"
+#include "dc_bios_types.h"
+
+/*
+ * Header of this unit
+ */
+
+#include "i2caux.h"
+
+/*
+ * Post-requisites: headers required by this unit
+ */
+
+#include "engine.h"
+#include "i2c_engine.h"
+#include "aux_engine.h"
+
+/*
+ * This unit
+ */
+
+#include "dce80/i2caux_dce80.h"
+
+#include "dce100/i2caux_dce100.h"
+
+#include "dce110/i2caux_dce110.h"
+
+#include "dce112/i2caux_dce112.h"
+
+#include "dce120/i2caux_dce120.h"
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/i2caux_dcn10.h"
+#endif
+
+#include "diagnostics/i2caux_diag.h"
+
+/*
+ * @brief
+ * Plain API, available publicly
+ */
+
+struct i2caux *dal_i2caux_create(
+ struct dc_context *ctx)
+{
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ return dal_i2caux_diag_fpga_create(ctx);
+ }
+
+ switch (ctx->dce_version) {
+ case DCE_VERSION_8_0:
+ case DCE_VERSION_8_1:
+ case DCE_VERSION_8_3:
+ return dal_i2caux_dce80_create(ctx);
+ case DCE_VERSION_11_2:
+ return dal_i2caux_dce112_create(ctx);
+ case DCE_VERSION_11_0:
+ return dal_i2caux_dce110_create(ctx);
+ case DCE_VERSION_10_0:
+ return dal_i2caux_dce100_create(ctx);
+ case DCE_VERSION_12_0:
+ return dal_i2caux_dce120_create(ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ case DCN_VERSION_1_0:
+ return dal_i2caux_dcn10_create(ctx);
+#endif
+
+ default:
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+bool dal_i2caux_submit_i2c_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct i2c_command *cmd)
+{
+ struct i2c_engine *engine;
+ uint8_t index_of_payload = 0;
+ bool result;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!cmd) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ /*
+ * default will be SW, however there is a feature flag in adapter
+ * service that determines whether SW i2c_engine will be available or
+ * not, if sw i2c is not available we will fallback to hw. This feature
+ * flag is set to not creating sw i2c engine for every dce except dce80
+ * currently
+ */
+ switch (cmd->engine) {
+ case I2C_COMMAND_ENGINE_DEFAULT:
+ case I2C_COMMAND_ENGINE_SW:
+ /* try to acquire SW engine first,
+ * acquire HW engine if SW engine not available */
+ engine = i2caux->funcs->acquire_i2c_sw_engine(i2caux, ddc);
+
+ if (!engine)
+ engine = i2caux->funcs->acquire_i2c_hw_engine(
+ i2caux, ddc);
+ break;
+ case I2C_COMMAND_ENGINE_HW:
+ default:
+ /* try to acquire HW engine first,
+ * acquire SW engine if HW engine not available */
+ engine = i2caux->funcs->acquire_i2c_hw_engine(i2caux, ddc);
+
+ if (!engine)
+ engine = i2caux->funcs->acquire_i2c_sw_engine(
+ i2caux, ddc);
+ }
+
+ if (!engine)
+ return false;
+
+ engine->funcs->set_speed(engine, cmd->speed);
+
+ result = true;
+
+ while (index_of_payload < cmd->number_of_payloads) {
+ bool mot = (index_of_payload != cmd->number_of_payloads - 1);
+
+ struct i2c_payload *payload = cmd->payloads + index_of_payload;
+
+ struct i2caux_transaction_request request = { 0 };
+
+ request.operation = payload->write ?
+ I2CAUX_TRANSACTION_WRITE :
+ I2CAUX_TRANSACTION_READ;
+
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
+ request.payload.address = (payload->address << 1) |
+ !payload->write;
+ request.payload.length = payload->length;
+ request.payload.data = payload->data;
+
+ if (!engine->base.funcs->submit_request(
+ &engine->base, &request, mot)) {
+ result = false;
+ break;
+ }
+
+ ++index_of_payload;
+ }
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+
+ return result;
+}
+
+bool dal_i2caux_submit_aux_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct aux_command *cmd)
+{
+ struct aux_engine *engine;
+ uint8_t index_of_payload = 0;
+ bool result;
+ bool mot;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!cmd) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc);
+
+ if (!engine)
+ return false;
+
+ engine->delay = cmd->defer_delay;
+ engine->max_defer_write_retry = cmd->max_defer_write_retry;
+
+ result = true;
+
+ while (index_of_payload < cmd->number_of_payloads) {
+ struct aux_payload *payload = cmd->payloads + index_of_payload;
+ struct i2caux_transaction_request request = { 0 };
+
+ if (cmd->mot == I2C_MOT_UNDEF)
+ mot = (index_of_payload != cmd->number_of_payloads - 1);
+ else
+ mot = (cmd->mot == I2C_MOT_TRUE);
+
+ request.operation = payload->write ?
+ I2CAUX_TRANSACTION_WRITE :
+ I2CAUX_TRANSACTION_READ;
+
+ if (payload->i2c_over_aux) {
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C;
+
+ request.payload.address = (payload->address << 1) |
+ !payload->write;
+ } else {
+ request.payload.address_space =
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD;
+
+ request.payload.address = payload->address;
+ }
+
+ request.payload.length = payload->length;
+ request.payload.data = payload->data;
+
+ if (!engine->base.funcs->submit_request(
+ &engine->base, &request, mot)) {
+ result = false;
+ break;
+ }
+
+ ++index_of_payload;
+ }
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+
+ return result;
+}
+
+static bool get_hw_supported_ddc_line(
+ struct ddc *ddc,
+ enum gpio_ddc_line *line)
+{
+ enum gpio_ddc_line line_found;
+
+ *line = GPIO_DDC_LINE_UNKNOWN;
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ if (!ddc->hw_info.hw_supported)
+ return false;
+
+ line_found = dal_ddc_get_line(ddc);
+
+ if (line_found >= GPIO_DDC_LINE_COUNT)
+ return false;
+
+ *line = line_found;
+
+ return true;
+}
+
+void dal_i2caux_configure_aux(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ union aux_config cfg)
+{
+ struct aux_engine *engine =
+ i2caux->funcs->acquire_aux_engine(i2caux, ddc);
+
+ if (!engine)
+ return;
+
+ engine->funcs->configure(engine, cfg);
+
+ i2caux->funcs->release_engine(i2caux, &engine->base);
+}
+
+void dal_i2caux_destroy(
+ struct i2caux **i2caux)
+{
+ if (!i2caux || !*i2caux) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ (*i2caux)->funcs->destroy(i2caux);
+
+ *i2caux = NULL;
+}
+
+/*
+ * @brief
+ * An utility function used by 'struct i2caux' and its descendants
+ */
+
+uint32_t dal_i2caux_get_reference_clock(
+ struct dc_bios *bios)
+{
+ struct dc_firmware_info info = { { 0 } };
+
+ if (bios->funcs->get_firmware_info(bios, &info) != BP_RESULT_OK)
+ return 0;
+
+ return info.pll_info.crystal_frequency;
+}
+
+/*
+ * @brief
+ * i2caux
+ */
+
+enum {
+ /* following are expressed in KHz */
+ DEFAULT_I2C_SW_SPEED = 50,
+ DEFAULT_I2C_HW_SPEED = 50,
+
+ DEFAULT_I2C_SW_SPEED_100KHZ = 100,
+ DEFAULT_I2C_HW_SPEED_100KHZ = 100,
+
+ /* This is the timeout as defined in DP 1.2a,
+ * 2.3.4 "Detailed uPacket TX AUX CH State Description". */
+ AUX_TIMEOUT_PERIOD = 400,
+
+ /* Ideally, the SW timeout should be just above 550usec
+ * which is programmed in HW.
+ * But the SW timeout of 600usec is not reliable,
+ * because on some systems, delay_in_microseconds()
+ * returns faster than it should.
+ * EPR #379763: by trial-and-error on different systems,
+ * 700usec is the minimum reliable SW timeout for polling
+ * the AUX_SW_STATUS.AUX_SW_DONE bit.
+ * This timeout expires *only* when there is
+ * AUX Error or AUX Timeout conditions - not during normal operation.
+ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+ * at most within ~240usec. That means,
+ * increasing this timeout will not affect normal operation,
+ * and we'll timeout after
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * This timeout is especially important for
+ * resume from S3 and CTS. */
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+
+struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ enum gpio_ddc_line line;
+ struct i2c_engine *engine = NULL;
+
+ if (get_hw_supported_ddc_line(ddc, &line))
+ engine = i2caux->i2c_sw_engines[line];
+
+ if (!engine)
+ engine = i2caux->i2c_generic_sw_engine;
+
+ if (!engine)
+ return NULL;
+
+ if (!engine->base.funcs->acquire(&engine->base, ddc))
+ return NULL;
+
+ return engine;
+}
+
+struct aux_engine *dal_i2caux_acquire_aux_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc)
+{
+ enum gpio_ddc_line line;
+ struct aux_engine *engine;
+
+ if (!get_hw_supported_ddc_line(ddc, &line))
+ return NULL;
+
+ engine = i2caux->aux_engines[line];
+
+ if (!engine)
+ return NULL;
+
+ if (!engine->base.funcs->acquire(&engine->base, ddc))
+ return NULL;
+
+ return engine;
+}
+
+void dal_i2caux_release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine)
+{
+ engine->funcs->release_engine(engine);
+
+ dal_ddc_close(engine->ddc);
+
+ engine->ddc = NULL;
+}
+
+void dal_i2caux_construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx)
+{
+ uint32_t i = 0;
+
+ i2caux->ctx = ctx;
+ do {
+ i2caux->i2c_sw_engines[i] = NULL;
+ i2caux->i2c_hw_engines[i] = NULL;
+ i2caux->aux_engines[i] = NULL;
+
+ ++i;
+ } while (i < GPIO_DDC_LINE_COUNT);
+
+ i2caux->i2c_generic_sw_engine = NULL;
+ i2caux->i2c_generic_hw_engine = NULL;
+
+ i2caux->aux_timeout_period =
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD;
+
+ if (ctx->dce_version >= DCE_VERSION_11_2) {
+ i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED_100KHZ;
+ i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED_100KHZ;
+ } else {
+ i2caux->default_i2c_hw_speed = DEFAULT_I2C_HW_SPEED;
+ i2caux->default_i2c_sw_speed = DEFAULT_I2C_SW_SPEED;
+ }
+}
+
+void dal_i2caux_destruct(
+ struct i2caux *i2caux)
+{
+ uint32_t i = 0;
+
+ if (i2caux->i2c_generic_hw_engine)
+ i2caux->i2c_generic_hw_engine->funcs->destroy(
+ &i2caux->i2c_generic_hw_engine);
+
+ if (i2caux->i2c_generic_sw_engine)
+ i2caux->i2c_generic_sw_engine->funcs->destroy(
+ &i2caux->i2c_generic_sw_engine);
+
+ do {
+ if (i2caux->aux_engines[i])
+ i2caux->aux_engines[i]->funcs->destroy(
+ &i2caux->aux_engines[i]);
+
+ if (i2caux->i2c_hw_engines[i])
+ i2caux->i2c_hw_engines[i]->funcs->destroy(
+ &i2caux->i2c_hw_engines[i]);
+
+ if (i2caux->i2c_sw_engines[i])
+ i2caux->i2c_sw_engines[i]->funcs->destroy(
+ &i2caux->i2c_sw_engines[i]);
+
+ ++i;
+ } while (i < GPIO_DDC_LINE_COUNT);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
new file mode 100644
index 000000000000..64f51bb06915
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2C_AUX_H__
+#define __DAL_I2C_AUX_H__
+
+uint32_t dal_i2caux_get_reference_clock(
+ struct dc_bios *bios);
+
+struct i2caux;
+
+struct engine;
+
+struct i2caux_funcs {
+ void (*destroy)(struct i2caux **ptr);
+ struct i2c_engine * (*acquire_i2c_sw_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ struct i2c_engine * (*acquire_i2c_hw_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ struct aux_engine * (*acquire_aux_engine)(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+ void (*release_engine)(
+ struct i2caux *i2caux,
+ struct engine *engine);
+};
+
+struct i2c_engine;
+struct aux_engine;
+
+struct i2caux {
+ struct dc_context *ctx;
+ const struct i2caux_funcs *funcs;
+ /* On ASIC we have certain amount of lines with HW DDC engine
+ * (4, 6, or maybe more in the future).
+ * For every such line, we create separate HW DDC engine
+ * (since we have these engines in HW) and separate SW DDC engine
+ * (to allow concurrent use of few lines).
+ * In similar way we have AUX engines. */
+
+ /* I2C SW engines, per DDC line.
+ * Only lines with HW DDC support will be initialized */
+ struct i2c_engine *i2c_sw_engines[GPIO_DDC_LINE_COUNT];
+
+ /* I2C HW engines, per DDC line.
+ * Only lines with HW DDC support will be initialized */
+ struct i2c_engine *i2c_hw_engines[GPIO_DDC_LINE_COUNT];
+
+ /* AUX engines, per DDC line.
+ * Only lines with HW AUX support will be initialized */
+ struct aux_engine *aux_engines[GPIO_DDC_LINE_COUNT];
+
+ /* For all other lines, we can use
+ * single instance of generic I2C HW engine
+ * (since in HW, there is single instance of it)
+ * or single instance of generic I2C SW engine.
+ * AUX is not supported for other lines. */
+
+ /* General-purpose I2C SW engine.
+ * Can be assigned dynamically to any line per transaction */
+ struct i2c_engine *i2c_generic_sw_engine;
+
+ /* General-purpose I2C generic HW engine.
+ * Can be assigned dynamically to almost any line per transaction */
+ struct i2c_engine *i2c_generic_hw_engine;
+
+ /* [anaumov] in DAL2, there is a Mutex */
+
+ uint32_t aux_timeout_period;
+
+ /* expressed in KHz */
+ uint32_t default_i2c_sw_speed;
+ uint32_t default_i2c_hw_speed;
+};
+
+void dal_i2caux_construct(
+ struct i2caux *i2caux,
+ struct dc_context *ctx);
+
+void dal_i2caux_release_engine(
+ struct i2caux *i2caux,
+ struct engine *engine);
+
+void dal_i2caux_destruct(
+ struct i2caux *i2caux);
+
+void dal_i2caux_destroy(
+ struct i2caux **ptr);
+
+struct i2c_engine *dal_i2caux_acquire_i2c_sw_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+
+struct aux_engine *dal_i2caux_acquire_aux_engine(
+ struct i2caux *i2caux,
+ struct ddc *ddc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
new file mode 100644
index 000000000000..39ee8eba3c31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef BW_FIXED_H_
+#define BW_FIXED_H_
+
+#define BW_FIXED_BITS_PER_FRACTIONAL_PART 24
+
+#define BW_FIXED_GET_INTEGER_PART(x) ((x) >> BW_FIXED_BITS_PER_FRACTIONAL_PART)
+struct bw_fixed {
+ int64_t value;
+};
+
+#define BW_FIXED_MIN_I32 \
+ (int64_t)(-(1LL << (63 - BW_FIXED_BITS_PER_FRACTIONAL_PART)))
+
+#define BW_FIXED_MAX_I32 \
+ (int64_t)((1ULL << (63 - BW_FIXED_BITS_PER_FRACTIONAL_PART)) - 1)
+
+static inline struct bw_fixed bw_min2(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ return (arg1.value <= arg2.value) ? arg1 : arg2;
+}
+
+static inline struct bw_fixed bw_max2(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ return (arg2.value <= arg1.value) ? arg1 : arg2;
+}
+
+static inline struct bw_fixed bw_min3(struct bw_fixed v1,
+ struct bw_fixed v2,
+ struct bw_fixed v3)
+{
+ return bw_min2(bw_min2(v1, v2), v3);
+}
+
+static inline struct bw_fixed bw_max3(struct bw_fixed v1,
+ struct bw_fixed v2,
+ struct bw_fixed v3)
+{
+ return bw_max2(bw_max2(v1, v2), v3);
+}
+
+struct bw_fixed bw_int_to_fixed_nonconst(int64_t value);
+static inline struct bw_fixed bw_int_to_fixed(int64_t value)
+{
+ if (__builtin_constant_p(value)) {
+ struct bw_fixed res;
+ BUILD_BUG_ON(value > BW_FIXED_MAX_I32 || value < BW_FIXED_MIN_I32);
+ res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART;
+ return res;
+ } else
+ return bw_int_to_fixed_nonconst(value);
+}
+
+static inline int32_t bw_fixed_to_int(struct bw_fixed value)
+{
+ return BW_FIXED_GET_INTEGER_PART(value.value);
+}
+
+struct bw_fixed bw_frc_to_fixed(int64_t num, int64_t denum);
+
+static inline struct bw_fixed fixed31_32_to_bw_fixed(int64_t raw)
+{
+ struct bw_fixed result = { 0 };
+
+ if (raw < 0) {
+ raw = -raw;
+ result.value = -(raw >> (32 - BW_FIXED_BITS_PER_FRACTIONAL_PART));
+ } else {
+ result.value = raw >> (32 - BW_FIXED_BITS_PER_FRACTIONAL_PART);
+ }
+
+ return result;
+}
+
+static inline struct bw_fixed bw_add(const struct bw_fixed arg1,
+ const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+static inline struct bw_fixed bw_sub(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2);
+static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return bw_frc_to_fixed(arg1.value, arg2.value);
+}
+
+static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ struct bw_fixed res;
+ div64_u64_rem(arg1.value, arg2.value, &res.value);
+ return res;
+}
+
+struct bw_fixed bw_floor2(const struct bw_fixed arg, const struct bw_fixed significance);
+struct bw_fixed bw_ceil2(const struct bw_fixed arg, const struct bw_fixed significance);
+
+static inline bool bw_equ(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+static inline bool bw_neq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value != arg2.value;
+}
+
+static inline bool bw_leq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+static inline bool bw_meq(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value >= arg2.value;
+}
+
+static inline bool bw_ltn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+static inline bool bw_mtn(const struct bw_fixed arg1, const struct bw_fixed arg2)
+{
+ return arg1.value > arg2.value;
+}
+
+#endif //BW_FIXED_H_
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
new file mode 100644
index 000000000000..ebcf67b5fc57
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_CLOCK_SOURCE_H__
+#define __DC_CLOCK_SOURCE_H__
+
+#include "dc_types.h"
+#include "include/grph_object_id.h"
+#include "include/bios_parser_types.h"
+
+struct clock_source;
+
+struct spread_spectrum_data {
+ uint32_t percentage; /*> In unit of 0.01% or 0.001%*/
+ uint32_t percentage_divider; /*> 100 or 1000 */
+ uint32_t freq_range_khz;
+ uint32_t modulation_freq_hz;
+
+ struct spread_spectrum_flags flags;
+};
+
+struct delta_sigma_data {
+ uint32_t feedback_amount;
+ uint32_t nfrac_amount;
+ uint32_t ds_frac_size;
+ uint32_t ds_frac_amount;
+};
+
+/**
+ * Pixel Clock Parameters structure
+ * These parameters are required as input
+ * when calculating Pixel Clock Dividers for requested Pixel Clock
+ */
+struct pixel_clk_flags {
+ uint32_t ENABLE_SS:1;
+ uint32_t DISPLAY_BLANKED:1;
+ uint32_t PROGRAM_PIXEL_CLOCK:1;
+ uint32_t PROGRAM_ID_CLOCK:1;
+ uint32_t SUPPORT_YCBCR420:1;
+};
+
+/**
+ * Display Port HW De spread of Reference Clock related Parameters structure
+ * Store it once at boot for later usage
+ */
+struct csdp_ref_clk_ds_params {
+ bool hw_dso_n_dp_ref_clk;
+/* Flag for HW De Spread enabled (if enabled SS on DP Reference Clock)*/
+ uint32_t avg_dp_ref_clk_khz;
+/* Average DP Reference clock (in KHz)*/
+ uint32_t ss_percentage_on_dp_ref_clk;
+/* DP Reference clock SS percentage
+ * (not to be mixed with DP IDCLK SS from PLL Settings)*/
+ uint32_t ss_percentage_divider;
+/* DP Reference clock SS percentage divider */
+};
+
+struct pixel_clk_params {
+ uint32_t requested_pix_clk; /* in KHz */
+/*> Requested Pixel Clock
+ * (based on Video Timing standard used for requested mode)*/
+ uint32_t requested_sym_clk; /* in KHz */
+/*> Requested Sym Clock (relevant only for display port)*/
+ uint32_t dp_ref_clk; /* in KHz */
+/*> DP reference clock - calculated only for DP signal for specific cases*/
+ struct graphics_object_id encoder_object_id;
+/*> Encoder object Id - needed by VBIOS Exec table*/
+ enum signal_type signal_type;
+/*> signalType -> Encoder Mode - needed by VBIOS Exec table*/
+ enum controller_id controller_id;
+/*> ControllerId - which controller using this PLL*/
+ enum dc_color_depth color_depth;
+ struct csdp_ref_clk_ds_params de_spread_params;
+/*> de-spread info, relevant only for on-the-fly tune-up pixel rate*/
+ enum dc_pixel_encoding pixel_encoding;
+ struct pixel_clk_flags flags;
+};
+
+/**
+ * Pixel Clock Dividers structure with desired Pixel Clock
+ * (adjusted after VBIOS exec table),
+ * with actually calculated Clock and reference Crystal frequency
+ */
+struct pll_settings {
+ uint32_t actual_pix_clk;
+ uint32_t adjusted_pix_clk;
+ uint32_t calculated_pix_clk;
+ uint32_t vco_freq;
+ uint32_t reference_freq;
+ uint32_t reference_divider;
+ uint32_t feedback_divider;
+ uint32_t fract_feedback_divider;
+ uint32_t pix_clk_post_divider;
+ uint32_t ss_percentage;
+ bool use_external_clk;
+};
+
+struct calc_pll_clock_source_init_data {
+ struct dc_bios *bp;
+ uint32_t min_pix_clk_pll_post_divider;
+ uint32_t max_pix_clk_pll_post_divider;
+ uint32_t min_pll_ref_divider;
+ uint32_t max_pll_ref_divider;
+ uint32_t min_override_input_pxl_clk_pll_freq_khz;
+/* if not 0, override the firmware info */
+
+ uint32_t max_override_input_pxl_clk_pll_freq_khz;
+/* if not 0, override the firmware info */
+
+ uint32_t num_fract_fb_divider_decimal_point;
+/* number of decimal point for fractional feedback divider value */
+
+ uint32_t num_fract_fb_divider_decimal_point_precision;
+/* number of decimal point to round off for fractional feedback divider value*/
+ struct dc_context *ctx;
+
+};
+
+struct calc_pll_clock_source {
+ uint32_t ref_freq_khz;
+ uint32_t min_pix_clock_pll_post_divider;
+ uint32_t max_pix_clock_pll_post_divider;
+ uint32_t min_pll_ref_divider;
+ uint32_t max_pll_ref_divider;
+
+ uint32_t max_vco_khz;
+ uint32_t min_vco_khz;
+ uint32_t min_pll_input_freq_khz;
+ uint32_t max_pll_input_freq_khz;
+
+ uint32_t fract_fb_divider_decimal_points_num;
+ uint32_t fract_fb_divider_factor;
+ uint32_t fract_fb_divider_precision;
+ uint32_t fract_fb_divider_precision_factor;
+ struct dc_context *ctx;
+};
+
+struct clock_source_funcs {
+ bool (*cs_power_down)(
+ struct clock_source *);
+ bool (*program_pix_clk)(struct clock_source *,
+ struct pixel_clk_params *, struct pll_settings *);
+ uint32_t (*get_pix_clk_dividers)(
+ struct clock_source *,
+ struct pixel_clk_params *,
+ struct pll_settings *);
+ uint32_t (*get_pix_rate_in_hz)(
+ struct clock_source *,
+ struct pixel_clk_params *,
+ struct pll_settings *);
+};
+
+struct clock_source {
+ const struct clock_source_funcs *funcs;
+ struct dc_context *ctx;
+ enum clock_source_id id;
+ bool dp_clk_src;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
new file mode 100644
index 000000000000..bcb18f5e1e60
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_COMPRESSOR_H__
+#define __DAL_COMPRESSOR_H__
+
+#include "include/grph_object_id.h"
+#include "bios_parser_interface.h"
+
+enum fbc_compress_ratio {
+ FBC_COMPRESS_RATIO_INVALID = 0,
+ FBC_COMPRESS_RATIO_1TO1 = 1,
+ FBC_COMPRESS_RATIO_2TO1 = 2,
+ FBC_COMPRESS_RATIO_4TO1 = 4,
+ FBC_COMPRESS_RATIO_8TO1 = 8,
+};
+
+union fbc_physical_address {
+ struct {
+ uint32_t low_part;
+ int32_t high_part;
+ } addr;
+ uint64_t quad_part;
+};
+
+struct compr_addr_and_pitch_params {
+ /* enum controller_id controller_id; */
+ uint32_t inst;
+ uint32_t source_view_width;
+ uint32_t source_view_height;
+};
+
+enum fbc_hw_max_resolution_supported {
+ FBC_MAX_X = 3840,
+ FBC_MAX_Y = 2400,
+ FBC_MAX_X_SG = 1920,
+ FBC_MAX_Y_SG = 1080,
+};
+
+struct compressor;
+
+struct compressor_funcs {
+
+ void (*power_up_fbc)(struct compressor *cp);
+ void (*enable_fbc)(struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+ void (*disable_fbc)(struct compressor *cp);
+ void (*set_fbc_invalidation_triggers)(struct compressor *cp,
+ uint32_t fbc_trigger);
+ void (*surface_address_and_pitch)(
+ struct compressor *cp,
+ struct compr_addr_and_pitch_params *params);
+ bool (*is_fbc_enabled_in_hw)(struct compressor *cp,
+ uint32_t *fbc_mapped_crtc_id);
+};
+struct compressor {
+ struct dc_context *ctx;
+ uint32_t attached_inst;
+ bool is_enabled;
+ const struct compressor_funcs *funcs;
+ union {
+ uint32_t raw;
+ struct {
+ uint32_t FBC_SUPPORT:1;
+ uint32_t FB_POOL:1;
+ uint32_t DYNAMIC_ALLOC:1;
+ uint32_t LPT_SUPPORT:1;
+ uint32_t LPT_MC_CONFIG:1;
+ uint32_t DUMMY_BACKEND:1;
+ uint32_t CLK_GATING_DISABLED:1;
+
+ } bits;
+ } options;
+
+ union fbc_physical_address compr_surface_address;
+
+ uint32_t embedded_panel_h_size;
+ uint32_t embedded_panel_v_size;
+ uint32_t memory_bus_width;
+ uint32_t banks_num;
+ uint32_t raw_size;
+ uint32_t channel_interleave_size;
+ uint32_t dram_channels_num;
+
+ uint32_t allocated_size;
+ uint32_t preferred_requested_size;
+ uint32_t lpt_channels_num;
+ enum fbc_compress_ratio min_compress_ratio;
+};
+
+struct fbc_input_info {
+ bool dynamic_fbc_buffer_alloc;
+ unsigned int source_view_width;
+ unsigned int source_view_height;
+ unsigned int num_of_active_targets;
+};
+
+
+struct fbc_requested_compressed_size {
+ unsigned int preferred_size;
+ unsigned int preferred_size_alignment;
+ unsigned int min_size;
+ unsigned int min_size_alignment;
+ union {
+ struct {
+ /* Above preferedSize must be allocated in FB pool */
+ unsigned int preferred_must_be_framebuffer_pool : 1;
+ /* Above minSize must be allocated in FB pool */
+ unsigned int min_must_be_framebuffer_pool : 1;
+ } bits;
+ unsigned int flags;
+ };
+};
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
new file mode 100644
index 000000000000..94fc31080fda
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _CORE_STATUS_H_
+#define _CORE_STATUS_H_
+
+enum dc_status {
+ DC_OK = 1,
+
+ DC_NO_CONTROLLER_RESOURCE = 2,
+ DC_NO_STREAM_ENG_RESOURCE = 3,
+ DC_NO_CLOCK_SOURCE_RESOURCE = 4,
+ DC_FAIL_CONTROLLER_VALIDATE = 5,
+ DC_FAIL_ENC_VALIDATE = 6,
+ DC_FAIL_ATTACH_SURFACES = 7,
+ DC_FAIL_DETACH_SURFACES = 8,
+ DC_FAIL_SURFACE_VALIDATE = 9,
+ DC_NO_DP_LINK_BANDWIDTH = 10,
+ DC_EXCEED_DONGLE_CAP = 11,
+ DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
+ DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
+ DC_FAIL_SCALING = 14,
+ DC_FAIL_DP_LINK_TRAINING = 15,
+
+ DC_ERROR_UNEXPECTED = -1
+};
+
+#endif /* _CORE_STATUS_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
new file mode 100644
index 000000000000..b69f321e2ab6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _CORE_TYPES_H_
+#define _CORE_TYPES_H_
+
+#include "dc.h"
+#include "dce_calcs.h"
+#include "dcn_calcs.h"
+#include "ddc_service_types.h"
+#include "dc_bios_types.h"
+#include "mem_input.h"
+#include "hubp.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "mpc.h"
+#endif
+
+#define MAX_CLOCK_SOURCES 7
+
+void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
+ uint32_t controller_id);
+
+#include "grph_object_id.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "clock_source.h"
+#include "audio.h"
+#include "dm_pp_smu.h"
+
+
+/************ link *****************/
+struct link_init_data {
+ const struct dc *dc;
+ struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
+ uint32_t connector_index; /* this will be mapped to the HPD pins */
+ uint32_t link_index; /* this is mapped to DAL display_index
+ TODO: remove it when DC is complete. */
+};
+
+enum {
+ FREE_ACQUIRED_RESOURCE = 0,
+ KEEP_ACQUIRED_RESOURCE = 1,
+};
+
+struct dc_link *link_create(const struct link_init_data *init_params);
+void link_destroy(struct dc_link **link);
+
+enum dc_status dc_link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+
+void core_link_resume(struct dc_link *link);
+
+void core_link_enable_stream(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+
+void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
+
+void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
+/********** DAL Core*********************/
+#include "display_clock.h"
+#include "transform.h"
+#include "dpp.h"
+
+struct resource_pool;
+struct dc_state;
+struct resource_context;
+
+struct resource_funcs {
+ void (*destroy)(struct resource_pool **pool);
+ struct link_encoder *(*link_enc_create)(
+ const struct encoder_init_data *init);
+
+ enum dc_status (*validate_guaranteed)(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context);
+
+ bool (*validate_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context);
+
+ enum dc_status (*validate_global)(
+ struct dc *dc,
+ struct dc_state *context);
+
+ struct pipe_ctx *(*acquire_idle_pipe_for_layer)(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream);
+
+ enum dc_status (*validate_plane)(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+
+ enum dc_status (*add_stream_to_ctx)(
+ struct dc *dc,
+ struct dc_state *new_ctx,
+ struct dc_stream_state *dc_stream);
+};
+
+struct audio_support{
+ bool dp_audio;
+ bool hdmi_audio_on_dongle;
+ bool hdmi_audio_native;
+};
+
+#define NO_UNDERLAY_PIPE -1
+
+struct resource_pool {
+ struct mem_input *mis[MAX_PIPES];
+ struct hubp *hubps[MAX_PIPES];
+ struct input_pixel_processor *ipps[MAX_PIPES];
+ struct transform *transforms[MAX_PIPES];
+ struct dpp *dpps[MAX_PIPES];
+ struct output_pixel_processor *opps[MAX_PIPES];
+ struct timing_generator *timing_generators[MAX_PIPES];
+ struct stream_encoder *stream_enc[MAX_PIPES * 2];
+
+ struct mpc *mpc;
+ struct pp_smu_funcs_rv *pp_smu;
+ struct pp_smu_display_requirement_rv pp_smu_req;
+
+ unsigned int pipe_count;
+ unsigned int underlay_pipe_index;
+ unsigned int stream_enc_count;
+ unsigned int ref_clock_inKhz;
+
+ /*
+ * reserved clock source for DP
+ */
+ struct clock_source *dp_clock_source;
+
+ struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
+ unsigned int clk_src_count;
+
+ struct audio *audios[MAX_PIPES];
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+ struct display_clock *display_clock;
+ struct irq_service *irqs;
+
+ struct abm *abm;
+ struct dmcu *dmcu;
+
+ const struct resource_funcs *funcs;
+ const struct resource_caps *res_cap;
+};
+
+struct stream_resource {
+ struct output_pixel_processor *opp;
+ struct timing_generator *tg;
+ struct stream_encoder *stream_enc;
+ struct audio *audio;
+
+ struct pixel_clk_params pix_clk_params;
+ struct encoder_info_frame encoder_info_frame;
+};
+
+struct plane_resource {
+ struct scaler_data scl_data;
+ struct hubp *hubp;
+ struct mem_input *mi;
+ struct input_pixel_processor *ipp;
+ struct transform *xfm;
+ struct dpp *dpp;
+};
+
+struct pipe_ctx {
+ struct dc_plane_state *plane_state;
+ struct dc_stream_state *stream;
+
+ struct plane_resource plane_res;
+ struct stream_resource stream_res;
+
+ struct clock_source *clock_source;
+
+ struct pll_settings pll_settings;
+
+ uint8_t pipe_idx;
+
+ struct pipe_ctx *top_pipe;
+ struct pipe_ctx *bottom_pipe;
+
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct _vcs_dpi_display_dlg_regs_st dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
+#endif
+ struct dwbc *dwbc;
+};
+
+struct resource_context {
+ struct pipe_ctx pipe_ctx[MAX_PIPES];
+ bool is_stream_enc_acquired[MAX_PIPES * 2];
+ bool is_audio_acquired[MAX_PIPES];
+ uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
+ uint8_t dp_clock_source_ref_count;
+};
+
+struct dce_bw_output {
+ bool cpuc_state_change_enable;
+ bool cpup_state_change_enable;
+ bool stutter_mode_enable;
+ bool nbp_state_change_enable;
+ bool all_displays_in_sync;
+ struct dce_watermarks urgent_wm_ns[MAX_PIPES];
+ struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES];
+ struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES];
+ int sclk_khz;
+ int sclk_deep_sleep_khz;
+ int yclk_khz;
+ int dispclk_khz;
+ int blackout_recovery_time_us;
+};
+
+struct dcn_bw_clocks {
+ int dispclk_khz;
+ bool dppclk_div;
+ int dcfclk_khz;
+ int dcfclk_deep_sleep_khz;
+ int fclk_khz;
+ int dram_ccm_us;
+ int min_active_dram_ccm_us;
+};
+
+struct dcn_bw_output {
+ struct dcn_bw_clocks cur_clk;
+ struct dcn_bw_clocks calc_clk;
+ struct dcn_watermark_set watermarks;
+};
+
+union bw_context {
+ struct dcn_bw_output dcn;
+ struct dce_bw_output dce;
+};
+
+struct dc_state {
+ struct dc_stream_state *streams[MAX_PIPES];
+ struct dc_stream_status stream_status[MAX_PIPES];
+ uint8_t stream_count;
+
+ struct resource_context res_ctx;
+
+ /* The output from BW and WM calculations. */
+ union bw_context bw;
+
+ /* Note: these are big structures, do *not* put on stack! */
+ struct dm_pp_display_configuration pp_display_cfg;
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ struct dcn_bw_internal_vars dcn_bw_vars;
+#endif
+
+ struct display_clock *dis_clk;
+
+ struct kref refcount;
+};
+
+#endif /* _CORE_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/custom_float.h b/drivers/gpu/drm/amd/display/dc/inc/custom_float.h
new file mode 100644
index 000000000000..f57239672216
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/custom_float.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef CUSTOM_FLOAT_H_
+#define CUSTOM_FLOAT_H_
+
+#include "bw_fixed.h"
+#include "hw_shared.h"
+#include "opp.h"
+
+
+bool convert_to_custom_float_format(
+ struct fixed31_32 value,
+ const struct custom_float_format *format,
+ uint32_t *result);
+
+
+#endif //CUSTOM_FLOAT_H_
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
new file mode 100644
index 000000000000..0bf73b742f1f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DDC_SERVICE_H__
+#define __DAL_DDC_SERVICE_H__
+
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+
+#define EDID_SEGMENT_SIZE 256
+
+/* Address range from 0x00 to 0x1F.*/
+#define DP_ADAPTOR_TYPE2_SIZE 0x20
+#define DP_ADAPTOR_TYPE2_REG_ID 0x10
+#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
+/* Identifies adaptor as Dual-mode adaptor */
+#define DP_ADAPTOR_TYPE2_ID 0xA0
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
+/* kHZ*/
+#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
+/* kHZ*/
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+
+#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
+
+struct ddc_service;
+struct graphics_object_id;
+enum ddc_result;
+struct av_sync_data;
+struct dp_receiver_id_info;
+
+struct i2c_payloads;
+struct aux_payloads;
+
+void dal_ddc_i2c_payloads_add(
+ struct i2c_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write);
+
+void dal_ddc_aux_payloads_add(
+ struct aux_payloads *payloads,
+ uint32_t address,
+ uint32_t len,
+ uint8_t *data,
+ bool write);
+
+struct ddc_service_init_data {
+ struct graphics_object_id id;
+ struct dc_context *ctx;
+ struct dc_link *link;
+};
+
+struct ddc_service *dal_ddc_service_create(
+ struct ddc_service_init_data *ddc_init_data);
+
+void dal_ddc_service_destroy(struct ddc_service **ddc);
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc);
+
+void dal_ddc_service_set_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type);
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc);
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap);
+
+bool dal_ddc_service_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t len);
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+ struct ddc_service *ddc,
+ bool i2c,
+ enum i2c_mot_mode mot,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t len);
+
+void dal_ddc_service_write_scdc_data(
+ struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble);
+
+void dal_ddc_service_read_scdc_data(
+ struct ddc_service *ddc_service);
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type);
+
+void dal_ddc_service_set_ddc_pin(
+ struct ddc_service *ddc_service,
+ struct ddc *ddc);
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service);
+
+uint32_t get_defer_delay(struct ddc_service *ddc);
+
+#endif /* __DAL_DDC_SERVICE_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
new file mode 100644
index 000000000000..616c73e2b0bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_H__
+#define __DC_LINK_DP_H__
+
+#define LINK_TRAINING_ATTEMPTS 4
+#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+
+struct dc_link;
+struct dc_stream_state;
+struct dc_link_settings;
+
+bool dp_hbr_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting);
+
+bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+
+void decide_link_settings(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+
+bool perform_link_training_with_retries(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts);
+
+bool is_mst_supported(struct dc_link *link);
+
+void detect_dp_sink_caps(struct dc_link *link);
+
+void detect_edp_sink_caps(struct dc_link *link);
+
+bool is_dp_active_dongle(const struct dc_link *link);
+
+void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
+
+#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
new file mode 100644
index 000000000000..ae2399f16d1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h
@@ -0,0 +1,481 @@
+/*
+ * Copyright 2015-2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * Bandwidth and Watermark calculations interface.
+ * (Refer to "DCEx_mode_support.xlsm" from Perforce.)
+ */
+#ifndef __DCE_CALCS_H__
+#define __DCE_CALCS_H__
+
+#include "bw_fixed.h"
+
+struct pipe_ctx;
+struct dc;
+struct dc_state;
+struct dce_bw_output;
+
+enum bw_calcs_version {
+ BW_CALCS_VERSION_INVALID,
+ BW_CALCS_VERSION_CARRIZO,
+ BW_CALCS_VERSION_POLARIS10,
+ BW_CALCS_VERSION_POLARIS11,
+ BW_CALCS_VERSION_STONEY,
+ BW_CALCS_VERSION_VEGA10
+};
+
+/*******************************************************************************
+ * There are three types of input into Calculations:
+ * 1. per-DCE static values - these are "hardcoded" properties of the DCEIP
+ * 2. board-level values - these are generally coming from VBIOS parser
+ * 3. mode/configuration values - depending Mode, Scaling number of Displays etc.
+ ******************************************************************************/
+
+enum bw_defines {
+ //Common
+ bw_def_no = 0,
+ bw_def_none = 0,
+ bw_def_yes = 1,
+ bw_def_ok = 1,
+ bw_def_high = 2,
+ bw_def_mid = 1,
+ bw_def_low = 0,
+
+ //Internal
+ bw_defs_start = 255,
+ bw_def_underlay422,
+ bw_def_underlay420_luma,
+ bw_def_underlay420_chroma,
+ bw_def_underlay444,
+ bw_def_graphics,
+ bw_def_display_write_back420_luma,
+ bw_def_display_write_back420_chroma,
+ bw_def_portrait,
+ bw_def_hsr_mtn_4,
+ bw_def_hsr_mtn_h_taps,
+ bw_def_ceiling__h_taps_div_4___meq_hsr,
+ bw_def_invalid_linear_or_stereo_mode,
+ bw_def_invalid_rotation_or_bpp_or_stereo,
+ bw_def_vsr_mtn_v_taps,
+ bw_def_vsr_mtn_4,
+ bw_def_auto,
+ bw_def_manual,
+ bw_def_exceeded_allowed_maximum_sclk,
+ bw_def_exceeded_allowed_page_close_open,
+ bw_def_exceeded_allowed_outstanding_pte_req_queue_size,
+ bw_def_exceeded_allowed_maximum_bw,
+ bw_def_landscape,
+
+ //Panning and bezel
+ bw_def_any_lines,
+
+ //Underlay mode
+ bw_def_underlay_only,
+ bw_def_blended,
+ bw_def_blend,
+
+ //Stereo mode
+ bw_def_mono,
+ bw_def_side_by_side,
+ bw_def_top_bottom,
+
+ //Underlay surface type
+ bw_def_420,
+ bw_def_422,
+ bw_def_444,
+
+ //Tiling mode
+ bw_def_linear,
+ bw_def_tiled,
+ bw_def_array_linear_general,
+ bw_def_array_linear_aligned,
+ bw_def_rotated_micro_tiling,
+ bw_def_display_micro_tiling,
+
+ //Memory type
+ bw_def_gddr5,
+ bw_def_hbm,
+
+ //Voltage
+ bw_def_high_no_nbp_state_change,
+ bw_def_0_72,
+ bw_def_0_8,
+ bw_def_0_9,
+
+ bw_def_notok = -1,
+ bw_def_na = -1
+};
+
+struct bw_calcs_dceip {
+ enum bw_calcs_version version;
+ bool large_cursor;
+ uint32_t cursor_max_outstanding_group_num;
+ bool dmif_pipe_en_fbc_chunk_tracker;
+ struct bw_fixed dmif_request_buffer_size;
+ uint32_t lines_interleaved_into_lb;
+ uint32_t low_power_tiling_mode;
+ uint32_t chunk_width;
+ uint32_t number_of_graphics_pipes;
+ uint32_t number_of_underlay_pipes;
+ bool display_write_back_supported;
+ bool argb_compression_support;
+ struct bw_fixed underlay_vscaler_efficiency6_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency8_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency10_bit_per_component;
+ struct bw_fixed underlay_vscaler_efficiency12_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency6_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency8_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency10_bit_per_component;
+ struct bw_fixed graphics_vscaler_efficiency12_bit_per_component;
+ struct bw_fixed alpha_vscaler_efficiency;
+ uint32_t max_dmif_buffer_allocated;
+ uint32_t graphics_dmif_size;
+ uint32_t underlay_luma_dmif_size;
+ uint32_t underlay_chroma_dmif_size;
+ bool pre_downscaler_enabled;
+ bool underlay_downscale_prefetch_enabled;
+ struct bw_fixed lb_write_pixels_per_dispclk;
+ struct bw_fixed lb_size_per_component444;
+ bool graphics_lb_nodownscaling_multi_line_prefetching;
+ struct bw_fixed stutter_and_dram_clock_state_change_gated_before_cursor;
+ struct bw_fixed underlay420_luma_lb_size_per_component;
+ struct bw_fixed underlay420_chroma_lb_size_per_component;
+ struct bw_fixed underlay422_lb_size_per_component;
+ struct bw_fixed cursor_chunk_width;
+ struct bw_fixed cursor_dcp_buffer_lines;
+ struct bw_fixed underlay_maximum_width_efficient_for_tiling;
+ struct bw_fixed underlay_maximum_height_efficient_for_tiling;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation;
+ struct bw_fixed minimum_outstanding_pte_request_limit;
+ struct bw_fixed maximum_total_outstanding_pte_requests_allowed_by_saw;
+ bool limit_excessive_outstanding_dmif_requests;
+ struct bw_fixed linear_mode_line_request_alternation_slice;
+ uint32_t scatter_gather_lines_of_pte_prefetching_in_linear_mode;
+ uint32_t display_write_back420_luma_mcifwr_buffer_size;
+ uint32_t display_write_back420_chroma_mcifwr_buffer_size;
+ struct bw_fixed request_efficiency;
+ struct bw_fixed dispclk_per_request;
+ struct bw_fixed dispclk_ramping_factor;
+ struct bw_fixed display_pipe_throughput_factor;
+ uint32_t scatter_gather_pte_request_rows_in_tiling_mode;
+ struct bw_fixed mcifwr_all_surfaces_burst_time;
+};
+
+struct bw_calcs_vbios {
+ enum bw_defines memory_type;
+ uint32_t dram_channel_width_in_bits;
+ uint32_t number_of_dram_channels;
+ uint32_t number_of_dram_banks;
+ struct bw_fixed low_yclk; /*m_hz*/
+ struct bw_fixed mid_yclk; /*m_hz*/
+ struct bw_fixed high_yclk; /*m_hz*/
+ struct bw_fixed low_sclk; /*m_hz*/
+ struct bw_fixed mid1_sclk; /*m_hz*/
+ struct bw_fixed mid2_sclk; /*m_hz*/
+ struct bw_fixed mid3_sclk; /*m_hz*/
+ struct bw_fixed mid4_sclk; /*m_hz*/
+ struct bw_fixed mid5_sclk; /*m_hz*/
+ struct bw_fixed mid6_sclk; /*m_hz*/
+ struct bw_fixed high_sclk; /*m_hz*/
+ struct bw_fixed low_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed mid_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed high_voltage_max_dispclk; /*m_hz*/
+ struct bw_fixed low_voltage_max_phyclk;
+ struct bw_fixed mid_voltage_max_phyclk;
+ struct bw_fixed high_voltage_max_phyclk;
+ struct bw_fixed data_return_bus_width;
+ struct bw_fixed trc;
+ struct bw_fixed dmifmc_urgent_latency;
+ struct bw_fixed stutter_self_refresh_exit_latency;
+ struct bw_fixed stutter_self_refresh_entry_latency;
+ struct bw_fixed nbp_state_change_latency;
+ struct bw_fixed mcifwrmc_urgent_latency;
+ bool scatter_gather_enable;
+ struct bw_fixed down_spread_percentage;
+ uint32_t cursor_width;
+ uint32_t average_compression_rate;
+ uint32_t number_of_request_slots_gmc_reserves_for_dmif_per_channel;
+ struct bw_fixed blackout_duration;
+ struct bw_fixed maximum_blackout_recovery_time;
+};
+
+/*******************************************************************************
+ * Temporary data structure(s).
+ ******************************************************************************/
+#define maximum_number_of_surfaces 12
+/*Units : MHz, us */
+
+struct bw_calcs_data {
+ /* data for all displays */
+ uint32_t number_of_displays;
+ enum bw_defines underlay_surface_type;
+ enum bw_defines panning_and_bezel_adjustment;
+ enum bw_defines graphics_tiling_mode;
+ uint32_t graphics_lb_bpc;
+ uint32_t underlay_lb_bpc;
+ enum bw_defines underlay_tiling_mode;
+ enum bw_defines d0_underlay_mode;
+ bool d1_display_write_back_dwb_enable;
+ enum bw_defines d1_underlay_mode;
+
+ bool cpup_state_change_enable;
+ bool cpuc_state_change_enable;
+ bool nbp_state_change_enable;
+ bool stutter_mode_enable;
+ uint32_t y_clk_level;
+ uint32_t sclk_level;
+ uint32_t number_of_underlay_surfaces;
+ uint32_t number_of_dram_wrchannels;
+ uint32_t chunk_request_delay;
+ uint32_t number_of_dram_channels;
+ enum bw_defines underlay_micro_tile_mode;
+ enum bw_defines graphics_micro_tile_mode;
+ struct bw_fixed max_phyclk;
+ struct bw_fixed dram_efficiency;
+ struct bw_fixed src_width_after_surface_type;
+ struct bw_fixed src_height_after_surface_type;
+ struct bw_fixed hsr_after_surface_type;
+ struct bw_fixed vsr_after_surface_type;
+ struct bw_fixed src_width_after_rotation;
+ struct bw_fixed src_height_after_rotation;
+ struct bw_fixed hsr_after_rotation;
+ struct bw_fixed vsr_after_rotation;
+ struct bw_fixed source_height_pixels;
+ struct bw_fixed hsr_after_stereo;
+ struct bw_fixed vsr_after_stereo;
+ struct bw_fixed source_width_in_lb;
+ struct bw_fixed lb_line_pitch;
+ struct bw_fixed underlay_maximum_source_efficient_for_tiling;
+ struct bw_fixed num_lines_at_frame_start;
+ struct bw_fixed min_dmif_size_in_time;
+ struct bw_fixed min_mcifwr_size_in_time;
+ struct bw_fixed total_requests_for_dmif_size;
+ struct bw_fixed peak_pte_request_to_eviction_ratio_limiting;
+ struct bw_fixed useful_pte_per_pte_request;
+ struct bw_fixed scatter_gather_pte_request_rows;
+ struct bw_fixed scatter_gather_row_height;
+ struct bw_fixed scatter_gather_pte_requests_in_vblank;
+ struct bw_fixed inefficient_linear_pitch_in_bytes;
+ struct bw_fixed cursor_total_data;
+ struct bw_fixed cursor_total_request_groups;
+ struct bw_fixed scatter_gather_total_pte_requests;
+ struct bw_fixed scatter_gather_total_pte_request_groups;
+ struct bw_fixed tile_width_in_pixels;
+ struct bw_fixed dmif_total_number_of_data_request_page_close_open;
+ struct bw_fixed mcifwr_total_number_of_data_request_page_close_open;
+ struct bw_fixed bytes_per_page_close_open;
+ struct bw_fixed mcifwr_total_page_close_open_time;
+ struct bw_fixed total_requests_for_adjusted_dmif_size;
+ struct bw_fixed total_dmifmc_urgent_trips;
+ struct bw_fixed total_dmifmc_urgent_latency;
+ struct bw_fixed total_display_reads_required_data;
+ struct bw_fixed total_display_reads_required_dram_access_data;
+ struct bw_fixed total_display_writes_required_data;
+ struct bw_fixed total_display_writes_required_dram_access_data;
+ struct bw_fixed display_reads_required_data;
+ struct bw_fixed display_reads_required_dram_access_data;
+ struct bw_fixed dmif_total_page_close_open_time;
+ struct bw_fixed min_cursor_memory_interface_buffer_size_in_time;
+ struct bw_fixed min_read_buffer_size_in_time;
+ struct bw_fixed display_reads_time_for_data_transfer;
+ struct bw_fixed display_writes_time_for_data_transfer;
+ struct bw_fixed dmif_required_dram_bandwidth;
+ struct bw_fixed mcifwr_required_dram_bandwidth;
+ struct bw_fixed required_dmifmc_urgent_latency_for_page_close_open;
+ struct bw_fixed required_mcifmcwr_urgent_latency;
+ struct bw_fixed required_dram_bandwidth_gbyte_per_second;
+ struct bw_fixed dram_bandwidth;
+ struct bw_fixed dmif_required_sclk;
+ struct bw_fixed mcifwr_required_sclk;
+ struct bw_fixed required_sclk;
+ struct bw_fixed downspread_factor;
+ struct bw_fixed v_scaler_efficiency;
+ struct bw_fixed scaler_limits_factor;
+ struct bw_fixed display_pipe_pixel_throughput;
+ struct bw_fixed total_dispclk_required_with_ramping;
+ struct bw_fixed total_dispclk_required_without_ramping;
+ struct bw_fixed total_read_request_bandwidth;
+ struct bw_fixed total_write_request_bandwidth;
+ struct bw_fixed dispclk_required_for_total_read_request_bandwidth;
+ struct bw_fixed total_dispclk_required_with_ramping_with_request_bandwidth;
+ struct bw_fixed total_dispclk_required_without_ramping_with_request_bandwidth;
+ struct bw_fixed dispclk;
+ struct bw_fixed blackout_recovery_time;
+ struct bw_fixed min_pixels_per_data_fifo_entry;
+ struct bw_fixed sclk_deep_sleep;
+ struct bw_fixed chunk_request_time;
+ struct bw_fixed cursor_request_time;
+ struct bw_fixed line_source_pixels_transfer_time;
+ struct bw_fixed dmifdram_access_efficiency;
+ struct bw_fixed mcifwrdram_access_efficiency;
+ struct bw_fixed total_average_bandwidth_no_compression;
+ struct bw_fixed total_average_bandwidth;
+ struct bw_fixed total_stutter_cycle_duration;
+ struct bw_fixed stutter_burst_time;
+ struct bw_fixed time_in_self_refresh;
+ struct bw_fixed stutter_efficiency;
+ struct bw_fixed worst_number_of_trips_to_memory;
+ struct bw_fixed immediate_flip_time;
+ struct bw_fixed latency_for_non_dmif_clients;
+ struct bw_fixed latency_for_non_mcifwr_clients;
+ struct bw_fixed dmifmc_urgent_latency_supported_in_high_sclk_and_yclk;
+ struct bw_fixed nbp_state_dram_speed_change_margin;
+ struct bw_fixed display_reads_time_for_data_transfer_and_urgent_latency;
+ struct bw_fixed dram_speed_change_margin;
+ struct bw_fixed min_vblank_dram_speed_change_margin;
+ struct bw_fixed min_stutter_refresh_duration;
+ uint32_t total_stutter_dmif_buffer_size;
+ uint32_t total_bytes_requested;
+ uint32_t min_stutter_dmif_buffer_size;
+ uint32_t num_stutter_bursts;
+ struct bw_fixed v_blank_nbp_state_dram_speed_change_latency_supported;
+ struct bw_fixed nbp_state_dram_speed_change_latency_supported;
+ bool fbc_en[maximum_number_of_surfaces];
+ bool lpt_en[maximum_number_of_surfaces];
+ bool displays_match_flag[maximum_number_of_surfaces];
+ bool use_alpha[maximum_number_of_surfaces];
+ bool orthogonal_rotation[maximum_number_of_surfaces];
+ bool enable[maximum_number_of_surfaces];
+ bool access_one_channel_only[maximum_number_of_surfaces];
+ bool scatter_gather_enable_for_pipe[maximum_number_of_surfaces];
+ bool interlace_mode[maximum_number_of_surfaces];
+ bool display_pstate_change_enable[maximum_number_of_surfaces];
+ bool line_buffer_prefetch[maximum_number_of_surfaces];
+ uint32_t bytes_per_pixel[maximum_number_of_surfaces];
+ uint32_t max_chunks_non_fbc_mode[maximum_number_of_surfaces];
+ uint32_t lb_bpc[maximum_number_of_surfaces];
+ uint32_t output_bpphdmi[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr2[maximum_number_of_surfaces];
+ uint32_t output_bppdp4_lane_hbr3[maximum_number_of_surfaces];
+ enum bw_defines stereo_mode[maximum_number_of_surfaces];
+ struct bw_fixed dmif_buffer_transfer_time[maximum_number_of_surfaces];
+ struct bw_fixed displays_with_same_mode[maximum_number_of_surfaces];
+ struct bw_fixed stutter_dmif_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed stutter_refresh_duration[maximum_number_of_surfaces];
+ struct bw_fixed stutter_exit_watermark[maximum_number_of_surfaces];
+ struct bw_fixed stutter_entry_watermark[maximum_number_of_surfaces];
+ struct bw_fixed h_total[maximum_number_of_surfaces];
+ struct bw_fixed v_total[maximum_number_of_surfaces];
+ struct bw_fixed pixel_rate[maximum_number_of_surfaces];
+ struct bw_fixed src_width[maximum_number_of_surfaces];
+ struct bw_fixed pitch_in_pixels[maximum_number_of_surfaces];
+ struct bw_fixed pitch_in_pixels_after_surface_type[maximum_number_of_surfaces];
+ struct bw_fixed src_height[maximum_number_of_surfaces];
+ struct bw_fixed scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed h_taps[maximum_number_of_surfaces];
+ struct bw_fixed v_taps[maximum_number_of_surfaces];
+ struct bw_fixed h_scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed v_scale_ratio[maximum_number_of_surfaces];
+ struct bw_fixed rotation_angle[maximum_number_of_surfaces];
+ struct bw_fixed compression_rate[maximum_number_of_surfaces];
+ struct bw_fixed hsr[maximum_number_of_surfaces];
+ struct bw_fixed vsr[maximum_number_of_surfaces];
+ struct bw_fixed source_width_rounded_up_to_chunks[maximum_number_of_surfaces];
+ struct bw_fixed source_width_pixels[maximum_number_of_surfaces];
+ struct bw_fixed source_height_rounded_up_to_chunks[maximum_number_of_surfaces];
+ struct bw_fixed display_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed request_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed bytes_per_request[maximum_number_of_surfaces];
+ struct bw_fixed useful_bytes_per_request[maximum_number_of_surfaces];
+ struct bw_fixed lines_interleaved_in_mem_access[maximum_number_of_surfaces];
+ struct bw_fixed latency_hiding_lines[maximum_number_of_surfaces];
+ struct bw_fixed lb_partitions[maximum_number_of_surfaces];
+ struct bw_fixed lb_partitions_max[maximum_number_of_surfaces];
+ struct bw_fixed dispclk_required_with_ramping[maximum_number_of_surfaces];
+ struct bw_fixed dispclk_required_without_ramping[maximum_number_of_surfaces];
+ struct bw_fixed data_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed outstanding_chunk_request_limit[maximum_number_of_surfaces];
+ struct bw_fixed urgent_watermark[maximum_number_of_surfaces];
+ struct bw_fixed nbp_state_change_watermark[maximum_number_of_surfaces];
+ struct bw_fixed v_filter_init[maximum_number_of_surfaces];
+ struct bw_fixed stutter_cycle_duration[maximum_number_of_surfaces];
+ struct bw_fixed average_bandwidth[maximum_number_of_surfaces];
+ struct bw_fixed average_bandwidth_no_compression[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_pte_request_limit[maximum_number_of_surfaces];
+ struct bw_fixed lb_size_per_component[maximum_number_of_surfaces];
+ struct bw_fixed memory_chunk_size_in_bytes[maximum_number_of_surfaces];
+ struct bw_fixed pipe_chunk_size_in_bytes[maximum_number_of_surfaces];
+ struct bw_fixed number_of_trips_to_memory_for_getting_apte_row[maximum_number_of_surfaces];
+ struct bw_fixed adjusted_data_buffer_size[maximum_number_of_surfaces];
+ struct bw_fixed adjusted_data_buffer_size_in_memory[maximum_number_of_surfaces];
+ struct bw_fixed pixels_per_data_fifo_entry[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_pte_requests_in_row[maximum_number_of_surfaces];
+ struct bw_fixed pte_request_per_chunk[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_page_width[maximum_number_of_surfaces];
+ struct bw_fixed scatter_gather_page_height[maximum_number_of_surfaces];
+ struct bw_fixed lb_lines_in_per_line_out_in_beginning_of_frame[maximum_number_of_surfaces];
+ struct bw_fixed lb_lines_in_per_line_out_in_middle_of_frame[maximum_number_of_surfaces];
+ struct bw_fixed cursor_width_pixels[maximum_number_of_surfaces];
+ struct bw_fixed minimum_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed maximum_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed minimum_latency_hiding_with_cursor[maximum_number_of_surfaces];
+ struct bw_fixed maximum_latency_hiding_with_cursor[maximum_number_of_surfaces];
+ struct bw_fixed src_pixels_for_first_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_pixels_for_last_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_data_for_first_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed src_data_for_last_output_pixel[maximum_number_of_surfaces];
+ struct bw_fixed active_time[maximum_number_of_surfaces];
+ struct bw_fixed horizontal_blank_and_chunk_granularity_factor[maximum_number_of_surfaces];
+ struct bw_fixed cursor_latency_hiding[maximum_number_of_surfaces];
+ struct bw_fixed v_blank_dram_speed_change_margin[maximum_number_of_surfaces];
+ uint32_t num_displays_with_margin[3][8];
+ struct bw_fixed dmif_burst_time[3][8];
+ struct bw_fixed mcifwr_burst_time[3][8];
+ struct bw_fixed line_source_transfer_time[maximum_number_of_surfaces][3][8];
+ struct bw_fixed dram_speed_change_line_source_transfer_time[maximum_number_of_surfaces][3][8];
+ struct bw_fixed min_dram_speed_change_margin[3][8];
+ struct bw_fixed dispclk_required_for_dram_speed_change[3][8];
+ struct bw_fixed blackout_duration_margin[3][8];
+ struct bw_fixed dispclk_required_for_blackout_duration[3][8];
+ struct bw_fixed dispclk_required_for_blackout_recovery[3][8];
+ struct bw_fixed dmif_required_sclk_for_urgent_latency[6];
+};
+
+/**
+ * Initialize structures with data which will NOT change at runtime.
+ */
+void bw_calcs_init(
+ struct bw_calcs_dceip *bw_dceip,
+ struct bw_calcs_vbios *bw_vbios,
+ struct hw_asic_id asic_id);
+
+/**
+ * Return:
+ * true - Display(s) configuration supported.
+ * In this case 'calcs_output' contains data for HW programming
+ * false - Display(s) configuration not supported (not enough bandwidth).
+ */
+bool bw_calcs(
+ struct dc_context *ctx,
+ const struct bw_calcs_dceip *dceip,
+ const struct bw_calcs_vbios *vbios,
+ const struct pipe_ctx *pipe,
+ int pipe_count,
+ struct dce_bw_output *calcs_output);
+
+#endif /* __BANDWIDTH_CALCS_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
new file mode 100644
index 000000000000..1e231f6de732
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * Bandwidth and Watermark calculations interface.
+ * (Refer to "DCEx_mode_support.xlsm" from Perforce.)
+ */
+#ifndef __DCN_CALCS_H__
+#define __DCN_CALCS_H__
+
+#include "bw_fixed.h"
+#include "display_clock.h"
+#include "../dml/display_mode_lib.h"
+
+struct dc;
+struct dc_state;
+
+/*******************************************************************************
+ * DCN data structures.
+ ******************************************************************************/
+
+#define number_of_planes 6
+#define number_of_planes_minus_one 5
+#define number_of_states 4
+#define number_of_states_plus_one 5
+
+#define ddr4_dram_width 64
+#define ddr4_dram_factor_single_Channel 16
+enum dcn_bw_defs {
+ dcn_bw_v_min0p65,
+ dcn_bw_v_mid0p72,
+ dcn_bw_v_nom0p8,
+ dcn_bw_v_max0p9,
+ dcn_bw_v_max0p91,
+ dcn_bw_no_support = 5,
+ dcn_bw_yes,
+ dcn_bw_hor,
+ dcn_bw_vert,
+ dcn_bw_override,
+ dcn_bw_rgb_sub_64,
+ dcn_bw_rgb_sub_32,
+ dcn_bw_rgb_sub_16,
+ dcn_bw_no,
+ dcn_bw_sw_linear,
+ dcn_bw_sw_4_kb_d,
+ dcn_bw_sw_4_kb_d_x,
+ dcn_bw_sw_64_kb_d,
+ dcn_bw_sw_64_kb_d_t,
+ dcn_bw_sw_64_kb_d_x,
+ dcn_bw_sw_var_d,
+ dcn_bw_sw_var_d_x,
+ dcn_bw_yuv420_sub_8,
+ dcn_bw_sw_4_kb_s,
+ dcn_bw_sw_4_kb_s_x,
+ dcn_bw_sw_64_kb_s,
+ dcn_bw_sw_64_kb_s_t,
+ dcn_bw_sw_64_kb_s_x,
+ dcn_bw_writeback,
+ dcn_bw_444,
+ dcn_bw_dp,
+ dcn_bw_420,
+ dcn_bw_hdmi,
+ dcn_bw_sw_var_s,
+ dcn_bw_sw_var_s_x,
+ dcn_bw_yuv420_sub_10,
+ dcn_bw_supported_in_v_active,
+ dcn_bw_supported_in_v_blank,
+ dcn_bw_not_supported,
+ dcn_bw_na,
+ dcn_bw_encoder_8bpc,
+ dcn_bw_encoder_10bpc,
+ dcn_bw_encoder_12bpc,
+ dcn_bw_encoder_16bpc,
+};
+
+/*bounding box parameters*/
+/*mode parameters*/
+/*system configuration*/
+/* display configuration*/
+struct dcn_bw_internal_vars {
+ float voltage[number_of_states_plus_one + 1];
+ float max_dispclk[number_of_states_plus_one + 1];
+ float max_dppclk[number_of_states_plus_one + 1];
+ float dcfclk_per_state[number_of_states_plus_one + 1];
+ float phyclk_per_state[number_of_states_plus_one + 1];
+ float fabric_and_dram_bandwidth_per_state[number_of_states_plus_one + 1];
+ float sr_exit_time;
+ float sr_enter_plus_exit_time;
+ float dram_clock_change_latency;
+ float urgent_latency;
+ float write_back_latency;
+ float percent_of_ideal_drambw_received_after_urg_latency;
+ float dcfclkv_max0p9;
+ float dcfclkv_nom0p8;
+ float dcfclkv_mid0p72;
+ float dcfclkv_min0p65;
+ float max_dispclk_vmax0p9;
+ float max_dppclk_vmax0p9;
+ float max_dispclk_vnom0p8;
+ float max_dppclk_vnom0p8;
+ float max_dispclk_vmid0p72;
+ float max_dppclk_vmid0p72;
+ float max_dispclk_vmin0p65;
+ float max_dppclk_vmin0p65;
+ float socclk;
+ float fabric_and_dram_bandwidth_vmax0p9;
+ float fabric_and_dram_bandwidth_vnom0p8;
+ float fabric_and_dram_bandwidth_vmid0p72;
+ float fabric_and_dram_bandwidth_vmin0p65;
+ float round_trip_ping_latency_cycles;
+ float urgent_out_of_order_return_per_channel;
+ float number_of_channels;
+ float vmm_page_size;
+ float return_bus_width;
+ float rob_buffer_size_in_kbyte;
+ float det_buffer_size_in_kbyte;
+ float dpp_output_buffer_pixels;
+ float opp_output_buffer_lines;
+ float pixel_chunk_size_in_kbyte;
+ float pte_chunk_size;
+ float meta_chunk_size;
+ float writeback_chunk_size;
+ enum dcn_bw_defs odm_capability;
+ enum dcn_bw_defs dsc_capability;
+ float line_buffer_size;
+ enum dcn_bw_defs is_line_buffer_bpp_fixed;
+ float line_buffer_fixed_bpp;
+ float max_line_buffer_lines;
+ float writeback_luma_buffer_size;
+ float writeback_chroma_buffer_size;
+ float max_num_dpp;
+ float max_num_writeback;
+ float max_dchub_topscl_throughput;
+ float max_pscl_tolb_throughput;
+ float max_lb_tovscl_throughput;
+ float max_vscl_tohscl_throughput;
+ float max_hscl_ratio;
+ float max_vscl_ratio;
+ float max_hscl_taps;
+ float max_vscl_taps;
+ float under_scan_factor;
+ float phyclkv_max0p9;
+ float phyclkv_nom0p8;
+ float phyclkv_mid0p72;
+ float phyclkv_min0p65;
+ float pte_buffer_size_in_requests;
+ float dispclk_ramping_margin;
+ float downspreading;
+ float max_inter_dcn_tile_repeaters;
+ enum dcn_bw_defs can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ enum dcn_bw_defs bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+ int mode;
+ float viewport_width[number_of_planes_minus_one + 1];
+ float htotal[number_of_planes_minus_one + 1];
+ float vtotal[number_of_planes_minus_one + 1];
+ float v_sync_plus_back_porch[number_of_planes_minus_one + 1];
+ float vactive[number_of_planes_minus_one + 1];
+ float pixel_clock[number_of_planes_minus_one + 1]; /*MHz*/
+ float viewport_height[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs dcc_enable[number_of_planes_minus_one + 1];
+ float dcc_rate[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_scan[number_of_planes_minus_one + 1];
+ float lb_bit_per_pixel[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_pixel_format[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs source_surface_mode[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output_format[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output_deep_color[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs output[number_of_planes_minus_one + 1];
+ float scaler_rec_out_width[number_of_planes_minus_one + 1];
+ float scaler_recout_height[number_of_planes_minus_one + 1];
+ float underscan_output[number_of_planes_minus_one + 1];
+ float interlace_output[number_of_planes_minus_one + 1];
+ float override_hta_ps[number_of_planes_minus_one + 1];
+ float override_vta_ps[number_of_planes_minus_one + 1];
+ float override_hta_pschroma[number_of_planes_minus_one + 1];
+ float override_vta_pschroma[number_of_planes_minus_one + 1];
+ float urgent_latency_support_us[number_of_planes_minus_one + 1];
+ float h_ratio[number_of_planes_minus_one + 1];
+ float v_ratio[number_of_planes_minus_one + 1];
+ float htaps[number_of_planes_minus_one + 1];
+ float vtaps[number_of_planes_minus_one + 1];
+ float hta_pschroma[number_of_planes_minus_one + 1];
+ float vta_pschroma[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs pte_enable;
+ enum dcn_bw_defs synchronized_vblank;
+ enum dcn_bw_defs ta_pscalculation;
+ int voltage_override_level;
+ int number_of_active_planes;
+ int voltage_level;
+ enum dcn_bw_defs immediate_flip_supported;
+ float dcfclk;
+ float max_phyclk;
+ float fabric_and_dram_bandwidth;
+ float dpp_per_plane_per_ratio[1 + 1][number_of_planes_minus_one + 1];
+ enum dcn_bw_defs dispclk_dppclk_support_per_ratio[1 + 1];
+ float required_dispclk_per_ratio[1 + 1];
+ enum dcn_bw_defs error_message[1 + 1];
+ int dispclk_dppclk_ratio;
+ float dpp_per_plane[number_of_planes_minus_one + 1];
+ float det_buffer_size_y[number_of_planes_minus_one + 1];
+ float det_buffer_size_c[number_of_planes_minus_one + 1];
+ float swath_height_y[number_of_planes_minus_one + 1];
+ float swath_height_c[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs final_error_message;
+ float frequency;
+ float header_line;
+ float header;
+ enum dcn_bw_defs voltage_override;
+ enum dcn_bw_defs allow_different_hratio_vratio;
+ float acceptable_quality_hta_ps;
+ float acceptable_quality_vta_ps;
+ float no_of_dpp[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_width_yper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_height_yper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float swath_height_cper_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float urgent_latency_support_us_per_state[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_ywith_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_cwith_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float required_prefetch_pixel_data_bw_with_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_ywithout_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float v_ratio_pre_cwithout_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ float required_prefetch_pixel_data_bw_without_immediate_flip[number_of_states_plus_one + 1][1 + 1][number_of_planes_minus_one + 1];
+ enum dcn_bw_defs prefetch_supported_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs prefetch_supported_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs v_ratio_in_prefetch_supported_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs v_ratio_in_prefetch_supported_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ float required_dispclk[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs dispclk_dppclk_support[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs total_available_pipes_support[number_of_states_plus_one + 1][1 + 1];
+ float total_number_of_active_dpp[number_of_states_plus_one + 1][1 + 1];
+ float total_number_of_dcc_active_dpp[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs urgent_latency_support[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs mode_support_with_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ enum dcn_bw_defs mode_support_without_immediate_flip[number_of_states_plus_one + 1][1 + 1];
+ float return_bw_per_state[number_of_states_plus_one + 1];
+ enum dcn_bw_defs dio_support[number_of_states_plus_one + 1];
+ float urgent_round_trip_and_out_of_order_latency_per_state[number_of_states_plus_one + 1];
+ enum dcn_bw_defs rob_support[number_of_states_plus_one + 1];
+ enum dcn_bw_defs bandwidth_support[number_of_states_plus_one + 1];
+ float prefetch_bw[number_of_planes_minus_one + 1];
+ float meta_pte_bytes_per_frame[number_of_planes_minus_one + 1];
+ float meta_row_bytes[number_of_planes_minus_one + 1];
+ float dpte_bytes_per_row[number_of_planes_minus_one + 1];
+ float prefetch_lines_y[number_of_planes_minus_one + 1];
+ float prefetch_lines_c[number_of_planes_minus_one + 1];
+ float max_num_sw_y[number_of_planes_minus_one + 1];
+ float max_num_sw_c[number_of_planes_minus_one + 1];
+ float line_times_for_prefetch[number_of_planes_minus_one + 1];
+ float lines_for_meta_pte_with_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_pte_without_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_and_dpte_row_with_immediate_flip[number_of_planes_minus_one + 1];
+ float lines_for_meta_and_dpte_row_without_immediate_flip[number_of_planes_minus_one + 1];
+ float min_dppclk_using_single_dpp[number_of_planes_minus_one + 1];
+ float swath_width_ysingle_dpp[number_of_planes_minus_one + 1];
+ float byte_per_pixel_in_dety[number_of_planes_minus_one + 1];
+ float byte_per_pixel_in_detc[number_of_planes_minus_one + 1];
+ float number_of_dpp_required_for_det_and_lb_size[number_of_planes_minus_one + 1];
+ float required_phyclk[number_of_planes_minus_one + 1];
+ float read256_block_height_y[number_of_planes_minus_one + 1];
+ float read256_block_width_y[number_of_planes_minus_one + 1];
+ float read256_block_height_c[number_of_planes_minus_one + 1];
+ float read256_block_width_c[number_of_planes_minus_one + 1];
+ float max_swath_height_y[number_of_planes_minus_one + 1];
+ float max_swath_height_c[number_of_planes_minus_one + 1];
+ float min_swath_height_y[number_of_planes_minus_one + 1];
+ float min_swath_height_c[number_of_planes_minus_one + 1];
+ float read_bandwidth[number_of_planes_minus_one + 1];
+ float write_bandwidth[number_of_planes_minus_one + 1];
+ float pscl_factor[number_of_planes_minus_one + 1];
+ float pscl_factor_chroma[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs scale_ratio_support;
+ enum dcn_bw_defs source_format_pixel_and_scan_support;
+ float total_read_bandwidth_consumed_gbyte_per_second;
+ float total_write_bandwidth_consumed_gbyte_per_second;
+ float total_bandwidth_consumed_gbyte_per_second;
+ enum dcn_bw_defs dcc_enabled_in_any_plane;
+ float return_bw_todcn_per_state;
+ float critical_point;
+ enum dcn_bw_defs writeback_latency_support;
+ float required_output_bw;
+ float total_number_of_active_writeback;
+ enum dcn_bw_defs total_available_writeback_support;
+ float maximum_swath_width;
+ float number_of_dpp_required_for_det_size;
+ float number_of_dpp_required_for_lb_size;
+ float min_dispclk_using_single_dpp;
+ float min_dispclk_using_dual_dpp;
+ enum dcn_bw_defs viewport_size_support;
+ float swath_width_granularity_y;
+ float rounded_up_max_swath_size_bytes_y;
+ float swath_width_granularity_c;
+ float rounded_up_max_swath_size_bytes_c;
+ float lines_in_det_luma;
+ float lines_in_det_chroma;
+ float effective_lb_latency_hiding_source_lines_luma;
+ float effective_lb_latency_hiding_source_lines_chroma;
+ float effective_detlb_lines_luma;
+ float effective_detlb_lines_chroma;
+ float projected_dcfclk_deep_sleep;
+ float meta_req_height_y;
+ float meta_req_width_y;
+ float meta_surface_width_y;
+ float meta_surface_height_y;
+ float meta_pte_bytes_per_frame_y;
+ float meta_row_bytes_y;
+ float macro_tile_block_size_bytes_y;
+ float macro_tile_block_height_y;
+ float data_pte_req_height_y;
+ float data_pte_req_width_y;
+ float dpte_bytes_per_row_y;
+ float meta_req_height_c;
+ float meta_req_width_c;
+ float meta_surface_width_c;
+ float meta_surface_height_c;
+ float meta_pte_bytes_per_frame_c;
+ float meta_row_bytes_c;
+ float macro_tile_block_size_bytes_c;
+ float macro_tile_block_height_c;
+ float macro_tile_block_width_c;
+ float data_pte_req_height_c;
+ float data_pte_req_width_c;
+ float dpte_bytes_per_row_c;
+ float v_init_y;
+ float max_partial_sw_y;
+ float v_init_c;
+ float max_partial_sw_c;
+ float dst_x_after_scaler;
+ float dst_y_after_scaler;
+ float time_calc;
+ float v_update_offset[number_of_planes_minus_one + 1];
+ float total_repeater_delay;
+ float v_update_width[number_of_planes_minus_one + 1];
+ float v_ready_offset[number_of_planes_minus_one + 1];
+ float time_setup;
+ float extra_latency;
+ float maximum_vstartup;
+ float bw_available_for_immediate_flip;
+ float total_immediate_flip_bytes[number_of_planes_minus_one + 1];
+ float time_for_meta_pte_with_immediate_flip;
+ float time_for_meta_pte_without_immediate_flip;
+ float time_for_meta_and_dpte_row_with_immediate_flip;
+ float time_for_meta_and_dpte_row_without_immediate_flip;
+ float line_times_to_request_prefetch_pixel_data_with_immediate_flip;
+ float line_times_to_request_prefetch_pixel_data_without_immediate_flip;
+ float maximum_read_bandwidth_with_prefetch_with_immediate_flip;
+ float maximum_read_bandwidth_with_prefetch_without_immediate_flip;
+ float voltage_level_with_immediate_flip;
+ float voltage_level_without_immediate_flip;
+ float total_number_of_active_dpp_per_ratio[1 + 1];
+ float byte_per_pix_dety;
+ float byte_per_pix_detc;
+ float read256_bytes_block_height_y;
+ float read256_bytes_block_width_y;
+ float read256_bytes_block_height_c;
+ float read256_bytes_block_width_c;
+ float maximum_swath_height_y;
+ float maximum_swath_height_c;
+ float minimum_swath_height_y;
+ float minimum_swath_height_c;
+ float swath_width;
+ float prefetch_bandwidth[number_of_planes_minus_one + 1];
+ float v_init_pre_fill_y[number_of_planes_minus_one + 1];
+ float v_init_pre_fill_c[number_of_planes_minus_one + 1];
+ float max_num_swath_y[number_of_planes_minus_one + 1];
+ float max_num_swath_c[number_of_planes_minus_one + 1];
+ float prefill_y[number_of_planes_minus_one + 1];
+ float prefill_c[number_of_planes_minus_one + 1];
+ float v_startup[number_of_planes_minus_one + 1];
+ enum dcn_bw_defs allow_dram_clock_change_during_vblank[number_of_planes_minus_one + 1];
+ float allow_dram_self_refresh_during_vblank[number_of_planes_minus_one + 1];
+ float v_ratio_prefetch_y[number_of_planes_minus_one + 1];
+ float v_ratio_prefetch_c[number_of_planes_minus_one + 1];
+ float destination_lines_for_prefetch[number_of_planes_minus_one + 1];
+ float destination_lines_to_request_vm_inv_blank[number_of_planes_minus_one + 1];
+ float destination_lines_to_request_row_in_vblank[number_of_planes_minus_one + 1];
+ float min_ttuv_blank[number_of_planes_minus_one + 1];
+ float byte_per_pixel_dety[number_of_planes_minus_one + 1];
+ float byte_per_pixel_detc[number_of_planes_minus_one + 1];
+ float swath_width_y[number_of_planes_minus_one + 1];
+ float lines_in_dety[number_of_planes_minus_one + 1];
+ float lines_in_dety_rounded_down_to_swath[number_of_planes_minus_one + 1];
+ float lines_in_detc[number_of_planes_minus_one + 1];
+ float lines_in_detc_rounded_down_to_swath[number_of_planes_minus_one + 1];
+ float full_det_buffering_time_y[number_of_planes_minus_one + 1];
+ float full_det_buffering_time_c[number_of_planes_minus_one + 1];
+ float active_dram_clock_change_latency_margin[number_of_planes_minus_one + 1];
+ float v_blank_dram_clock_change_latency_margin[number_of_planes_minus_one + 1];
+ float dcfclk_deep_sleep_per_plane[number_of_planes_minus_one + 1];
+ float read_bandwidth_plane_luma[number_of_planes_minus_one + 1];
+ float read_bandwidth_plane_chroma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_luma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_chroma[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_luma_prefetch[number_of_planes_minus_one + 1];
+ float display_pipe_line_delivery_time_chroma_prefetch[number_of_planes_minus_one + 1];
+ float pixel_pte_bytes_per_row[number_of_planes_minus_one + 1];
+ float meta_pte_bytes_frame[number_of_planes_minus_one + 1];
+ float meta_row_byte[number_of_planes_minus_one + 1];
+ float prefetch_source_lines_y[number_of_planes_minus_one + 1];
+ float prefetch_source_lines_c[number_of_planes_minus_one + 1];
+ float pscl_throughput[number_of_planes_minus_one + 1];
+ float pscl_throughput_chroma[number_of_planes_minus_one + 1];
+ float output_bpphdmi[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr2[number_of_planes_minus_one + 1];
+ float output_bppdp4_lane_hbr3[number_of_planes_minus_one + 1];
+ float max_vstartup_lines[number_of_planes_minus_one + 1];
+ float dispclk_with_ramping;
+ float dispclk_without_ramping;
+ float dppclk_using_single_dpp_luma;
+ float dppclk_using_single_dpp;
+ float dppclk_using_single_dpp_chroma;
+ enum dcn_bw_defs odm_capable;
+ float dispclk;
+ float dppclk;
+ float return_bandwidth_to_dcn;
+ enum dcn_bw_defs dcc_enabled_any_plane;
+ float return_bw;
+ float critical_compression;
+ float total_data_read_bandwidth;
+ float total_active_dpp;
+ float total_dcc_active_dpp;
+ float urgent_round_trip_and_out_of_order_latency;
+ float last_pixel_of_line_extra_watermark;
+ float data_fabric_line_delivery_time_luma;
+ float data_fabric_line_delivery_time_chroma;
+ float urgent_extra_latency;
+ float urgent_watermark;
+ float ptemeta_urgent_watermark;
+ float dram_clock_change_watermark;
+ float total_active_writeback;
+ float writeback_dram_clock_change_watermark;
+ float min_full_det_buffering_time;
+ float frame_time_for_min_full_det_buffering_time;
+ float average_read_bandwidth_gbyte_per_second;
+ float part_of_burst_that_fits_in_rob;
+ float stutter_burst_time;
+ float stutter_efficiency_not_including_vblank;
+ float smallest_vblank;
+ float v_blank_time;
+ float stutter_efficiency;
+ float dcf_clk_deep_sleep;
+ float stutter_exit_watermark;
+ float stutter_enter_plus_exit_watermark;
+ float effective_det_plus_lb_lines_luma;
+ float urgent_latency_support_us_luma;
+ float effective_det_plus_lb_lines_chroma;
+ float urgent_latency_support_us_chroma;
+ float min_urgent_latency_support_us;
+ float non_urgent_latency_tolerance;
+ float block_height256_bytes_y;
+ float block_height256_bytes_c;
+ float meta_request_width_y;
+ float meta_surf_width_y;
+ float meta_surf_height_y;
+ float meta_pte_bytes_frame_y;
+ float meta_row_byte_y;
+ float macro_tile_size_byte_y;
+ float macro_tile_height_y;
+ float pixel_pte_req_height_y;
+ float pixel_pte_req_width_y;
+ float pixel_pte_bytes_per_row_y;
+ float meta_request_width_c;
+ float meta_surf_width_c;
+ float meta_surf_height_c;
+ float meta_pte_bytes_frame_c;
+ float meta_row_byte_c;
+ float macro_tile_size_bytes_c;
+ float macro_tile_height_c;
+ float pixel_pte_req_height_c;
+ float pixel_pte_req_width_c;
+ float pixel_pte_bytes_per_row_c;
+ float max_partial_swath_y;
+ float max_partial_swath_c;
+ float t_calc;
+ float next_prefetch_mode;
+ float v_startup_lines;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4;
+ enum dcn_bw_defs planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2;
+ enum dcn_bw_defs v_ratio_prefetch_more_than4;
+ enum dcn_bw_defs destination_line_times_for_prefetch_less_than2;
+ float prefetch_mode;
+ float dstx_after_scaler;
+ float dsty_after_scaler;
+ float v_update_offset_pix;
+ float total_repeater_delay_time;
+ float v_update_width_pix;
+ float v_ready_offset_pix;
+ float t_setup;
+ float t_wait;
+ float bandwidth_available_for_immediate_flip;
+ float tot_immediate_flip_bytes;
+ float max_rd_bandwidth;
+ float time_for_fetching_meta_pte;
+ float time_for_fetching_row_in_vblank;
+ float lines_to_request_prefetch_pixel_data;
+ float required_prefetch_pix_data_bw;
+ enum dcn_bw_defs prefetch_mode_supported;
+ float active_dp_ps;
+ float lb_latency_hiding_source_lines_y;
+ float lb_latency_hiding_source_lines_c;
+ float effective_lb_latency_hiding_y;
+ float effective_lb_latency_hiding_c;
+ float dpp_output_buffer_lines_y;
+ float dpp_output_buffer_lines_c;
+ float dppopp_buffering_y;
+ float max_det_buffering_time_y;
+ float active_dram_clock_change_latency_margin_y;
+ float dppopp_buffering_c;
+ float max_det_buffering_time_c;
+ float active_dram_clock_change_latency_margin_c;
+ float writeback_dram_clock_change_latency_margin;
+ float min_active_dram_clock_change_margin;
+ float v_blank_of_min_active_dram_clock_change_margin;
+ float second_min_active_dram_clock_change_margin;
+ float min_vblank_dram_clock_change_margin;
+ float dram_clock_change_margin;
+ float dram_clock_change_support;
+ float wr_bandwidth;
+ float max_used_bw;
+};
+
+struct dcn_soc_bounding_box {
+ float sr_exit_time; /*us*/
+ float sr_enter_plus_exit_time; /*us*/
+ float urgent_latency; /*us*/
+ float write_back_latency; /*us*/
+ float percent_of_ideal_drambw_received_after_urg_latency; /*%*/
+ int max_request_size; /*bytes*/
+ float dcfclkv_max0p9; /*MHz*/
+ float dcfclkv_nom0p8; /*MHz*/
+ float dcfclkv_mid0p72; /*MHz*/
+ float dcfclkv_min0p65; /*MHz*/
+ float max_dispclk_vmax0p9; /*MHz*/
+ float max_dispclk_vmid0p72; /*MHz*/
+ float max_dispclk_vnom0p8; /*MHz*/
+ float max_dispclk_vmin0p65; /*MHz*/
+ float max_dppclk_vmax0p9; /*MHz*/
+ float max_dppclk_vnom0p8; /*MHz*/
+ float max_dppclk_vmid0p72; /*MHz*/
+ float max_dppclk_vmin0p65; /*MHz*/
+ float socclk; /*MHz*/
+ float fabric_and_dram_bandwidth_vmax0p9; /*GB/s*/
+ float fabric_and_dram_bandwidth_vnom0p8; /*GB/s*/
+ float fabric_and_dram_bandwidth_vmid0p72; /*GB/s*/
+ float fabric_and_dram_bandwidth_vmin0p65; /*GB/s*/
+ float phyclkv_max0p9; /*MHz*/
+ float phyclkv_nom0p8; /*MHz*/
+ float phyclkv_mid0p72; /*MHz*/
+ float phyclkv_min0p65; /*MHz*/
+ float downspreading; /*%*/
+ int round_trip_ping_latency_cycles; /*DCFCLK Cycles*/
+ int urgent_out_of_order_return_per_channel; /*bytes*/
+ int number_of_channels;
+ int vmm_page_size; /*bytes*/
+ float dram_clock_change_latency; /*us*/
+ int return_bus_width; /*bytes*/
+ float percent_disp_bw_limit; /*%*/
+};
+extern const struct dcn_soc_bounding_box dcn10_soc_defaults;
+
+struct dcn_ip_params {
+ float rob_buffer_size_in_kbyte;
+ float det_buffer_size_in_kbyte;
+ float dpp_output_buffer_pixels;
+ float opp_output_buffer_lines;
+ float pixel_chunk_size_in_kbyte;
+ enum dcn_bw_defs pte_enable;
+ int pte_chunk_size; /*kbytes*/
+ int meta_chunk_size; /*kbytes*/
+ int writeback_chunk_size; /*kbytes*/
+ enum dcn_bw_defs odm_capability;
+ enum dcn_bw_defs dsc_capability;
+ int line_buffer_size; /*bit*/
+ int max_line_buffer_lines;
+ enum dcn_bw_defs is_line_buffer_bpp_fixed;
+ int line_buffer_fixed_bpp;
+ int writeback_luma_buffer_size; /*kbytes*/
+ int writeback_chroma_buffer_size; /*kbytes*/
+ int max_num_dpp;
+ int max_num_writeback;
+ int max_dchub_topscl_throughput; /*pixels/dppclk*/
+ int max_pscl_tolb_throughput; /*pixels/dppclk*/
+ int max_lb_tovscl_throughput; /*pixels/dppclk*/
+ int max_vscl_tohscl_throughput; /*pixels/dppclk*/
+ float max_hscl_ratio;
+ float max_vscl_ratio;
+ int max_hscl_taps;
+ int max_vscl_taps;
+ int pte_buffer_size_in_requests;
+ float dispclk_ramping_margin; /*%*/
+ float under_scan_factor;
+ int max_inter_dcn_tile_repeaters;
+ enum dcn_bw_defs can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
+ enum dcn_bw_defs bug_forcing_luma_and_chroma_request_to_same_size_fixed;
+ int dcfclk_cstate_latency;
+};
+extern const struct dcn_ip_params dcn10_ip_defaults;
+
+bool dcn_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
+
+unsigned int dcn_find_dcfclk_suits_all(
+ const struct dc *dc,
+ struct clocks_value *clocks);
+
+void dcn_bw_update_from_pplib(struct dc *dc);
+void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
+void dcn_bw_sync_calcs_and_dml(struct dc *dc);
+
+#endif /* __DCN_CALCS_H__ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
new file mode 100644
index 000000000000..c93b9b9a817c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -0,0 +1,48 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_ABM_H__
+#define __DC_ABM_H__
+
+#include "dm_services_types.h"
+
+struct abm {
+ struct dc_context *ctx;
+ const struct abm_funcs *funcs;
+};
+
+struct abm_funcs {
+ void (*abm_init)(struct abm *abm);
+ bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
+ bool (*set_abm_immediate_disable)(struct abm *abm);
+ bool (*init_backlight)(struct abm *abm);
+ bool (*set_backlight_level)(struct abm *abm,
+ unsigned int backlight_level,
+ unsigned int frame_ramp,
+ unsigned int controller_id);
+ unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+ bool (*is_dmcu_initialized)(struct abm *abm);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
new file mode 100644
index 000000000000..925204f49717
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/audio.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUDIO_H__
+#define __DAL_AUDIO_H__
+
+#include "audio_types.h"
+
+struct audio;
+
+struct audio_funcs {
+
+ bool (*endpoint_valid)(struct audio *audio);
+
+ void (*hw_init)(struct audio *audio);
+
+ void (*az_enable)(struct audio *audio);
+
+ void (*az_disable)(struct audio *audio);
+
+ void (*az_configure)(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_info *audio_info);
+
+ void (*wall_dto_setup)(struct audio *audio,
+ enum signal_type signal,
+ const struct audio_crtc_info *crtc_info,
+ const struct audio_pll_info *pll_info);
+
+ void (*destroy)(struct audio **audio);
+};
+
+struct audio {
+ const struct audio_funcs *funcs;
+ struct dc_context *ctx;
+ unsigned int inst;
+};
+
+#endif /* __DAL_AUDIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
new file mode 100644
index 000000000000..f5f69cd81f6f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DISPLAY_CLOCK_H__
+#define __DISPLAY_CLOCK_H__
+
+#include "dm_services_types.h"
+
+
+struct clocks_value {
+ int dispclk_in_khz;
+ int max_pixelclk_in_khz;
+ int max_non_dp_phyclk_in_khz;
+ int max_dp_phyclk_in_khz;
+ bool dispclk_notify_pplib_done;
+ bool pixelclk_notify_pplib_done;
+ bool phyclk_notigy_pplib_done;
+ int dcfclock_in_khz;
+ int dppclk_in_khz;
+ int mclk_in_khz;
+ int phyclk_in_khz;
+ int common_vdd_level;
+};
+
+
+/* Structure containing all state-dependent clocks
+ * (dependent on "enum clocks_state") */
+struct state_dependent_clocks {
+ int display_clk_khz;
+ int pixel_clk_khz;
+};
+
+struct display_clock {
+ struct dc_context *ctx;
+ const struct display_clock_funcs *funcs;
+
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+ struct clocks_value cur_clocks_value;
+};
+
+struct display_clock_funcs {
+ int (*set_clock)(struct display_clock *disp_clk,
+ int requested_clock_khz);
+
+ enum dm_pp_clocks_state (*get_required_clocks_state)(
+ struct display_clock *disp_clk,
+ struct state_dependent_clocks *req_clocks);
+
+ bool (*set_min_clocks_state)(struct display_clock *disp_clk,
+ enum dm_pp_clocks_state dm_pp_clocks_state);
+
+ int (*get_dp_ref_clk_frequency)(struct display_clock *disp_clk);
+
+ bool (*apply_clock_voltage_request)(
+ struct display_clock *disp_clk,
+ enum dm_pp_clock_type clocks_type,
+ int clocks_in_khz,
+ bool pre_mode_set,
+ bool update_dp_phyclk);
+};
+
+#endif /* __DISPLAY_CLOCK_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
new file mode 100644
index 000000000000..0574c29cc4a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -0,0 +1,50 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DMCU_H__
+#define __DC_DMCU_H__
+
+#include "dm_services_types.h"
+
+struct dmcu {
+ struct dc_context *ctx;
+ const struct dmcu_funcs *funcs;
+};
+
+struct dmcu_funcs {
+ bool (*load_iram)(struct dmcu *dmcu,
+ unsigned int start_offset,
+ const char *src,
+ unsigned int bytes);
+ void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
+ void (*setup_psr)(struct dmcu *dmcu,
+ struct dc_link *link,
+ struct psr_context *psr_context);
+ void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
+ void (*set_psr_wait_loop)(struct dmcu *dmcu,
+ unsigned int wait_loop_number);
+ void (*get_psr_wait_loop)(unsigned int *psr_wait_loop_number);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
new file mode 100644
index 000000000000..83a68460edcd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DAL_DPP_H__
+#define __DAL_DPP_H__
+
+#include "transform.h"
+
+struct dpp {
+ const struct dpp_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ struct dpp_caps *caps;
+ struct pwl_params regamma_params;
+};
+
+struct dpp_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+};
+
+struct dpp_funcs {
+ void (*dpp_reset)(struct dpp *dpp);
+
+ void (*dpp_set_scaler)(struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+ void (*dpp_set_pixel_storage_depth)(
+ struct dpp *dpp,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params);
+
+ bool (*dpp_get_optimal_number_of_taps)(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+ void (*dpp_set_gamut_remap)(
+ struct dpp *dpp,
+ const struct dpp_grph_csc_adjustment *adjust);
+
+ void (*opp_set_csc_default)(
+ struct dpp *dpp,
+ const struct default_adjustment *default_adjust);
+
+ void (*opp_set_csc_adjustment)(
+ struct dpp *dpp,
+ const struct out_csc_color_matrix *tbl_entry);
+
+ void (*opp_power_on_regamma_lut)(
+ struct dpp *dpp,
+ bool power_on);
+
+ void (*opp_program_regamma_lut)(
+ struct dpp *dpp,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+ void (*opp_configure_regamma_lut)(
+ struct dpp *dpp,
+ bool is_ram_a);
+
+ void (*opp_program_regamma_lutb_settings)(
+ struct dpp *dpp,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_luta_settings)(
+ struct dpp *dpp,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_pwl)(
+ struct dpp *dpp, const struct pwl_params *params);
+
+ void (*opp_set_regamma_mode)(
+ struct dpp *dpp_base,
+ enum opp_regamma mode);
+
+ void (*ipp_set_degamma)(
+ struct dpp *dpp_base,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_input_lut)(
+ struct dpp *dpp_base,
+ const struct dc_gamma *gamma);
+
+ void (*ipp_program_degamma_pwl)(struct dpp *dpp_base,
+ const struct pwl_params *params);
+
+ void (*ipp_setup)(
+ struct dpp *dpp_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ void (*ipp_full_bypass)(struct dpp *dpp_base);
+
+ void (*set_cursor_attributes)(
+ struct dpp *dpp_base,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct dpp *dpp_base,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param,
+ uint32_t width
+ );
+
+};
+
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
new file mode 100644
index 000000000000..90d0148430fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_H__
+#define __DAL_GPIO_H__
+
+#include "gpio_types.h"
+
+struct gpio {
+ struct gpio_service *service;
+ struct hw_gpio_pin *pin;
+ enum gpio_id id;
+ uint32_t en;
+ enum gpio_mode mode;
+ /* when GPIO comes from VBIOS, it has defined output state */
+ enum gpio_pin_output_state output_state;
+};
+
+#if 0
+struct gpio_funcs {
+
+ struct hw_gpio_pin *(*create_ddc_data)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_ddc_clock)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_generic)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_hpd)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gpio_pad)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_sync)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+ struct hw_gpio_pin *(*create_gsl)(
+ struct dc_context *ctx,
+ enum gpio_id id,
+ uint32_t en);
+
+ /* HW translation */
+ bool (*offset_to_id)(
+ uint32_t offset,
+ uint32_t mask,
+ enum gpio_id *id,
+ uint32_t *en);
+ bool (*id_to_offset)(
+ enum gpio_id id,
+ uint32_t en,
+ struct gpio_pin_info *info);
+};
+#endif
+
+#endif /* __DAL_GPIO__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
new file mode 100644
index 000000000000..0d186be24cf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HUBP_H__
+#define __DAL_HUBP_H__
+
+#include "mem_input.h"
+
+struct hubp {
+ struct hubp_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+ int inst;
+ int opp_id;
+ int mpcc_id;
+ struct dc_cursor_attributes curs_attr;
+};
+
+
+struct hubp_funcs {
+ void (*hubp_setup)(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+ void (*dcc_control)(struct hubp *hubp, bool enable,
+ bool independent_64b_blks);
+ void (*mem_program_viewport)(
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+ bool (*hubp_program_surface_flip_and_addr)(
+ struct hubp *hubp,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+ void (*hubp_program_pte_vm)(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation);
+
+ void (*hubp_set_vm_system_aperture_settings)(
+ struct hubp *hubp,
+ struct vm_system_aperture_param *apt);
+
+ void (*hubp_set_vm_context0_settings)(
+ struct hubp *hubp,
+ const struct vm_context0_param *vm0);
+
+ void (*hubp_program_surface_config)(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+ bool (*hubp_is_flip_pending)(struct hubp *hubp);
+
+ void (*hubp_update_dchub)(struct hubp *hubp,
+ struct dchub_init_data *dh_data);
+
+ void (*set_blank)(struct hubp *hubp, bool blank);
+ void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);
+
+ void (*set_cursor_attributes)(
+ struct hubp *hubp,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct hubp *hubp,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
new file mode 100644
index 000000000000..9602f261b614
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SHARED_H__
+#define __DAL_HW_SHARED_H__
+
+#include "os_types.h"
+#include "fixed31_32.h"
+#include "dc_hw_types.h"
+
+/******************************************************************************
+ * Data types shared between different Virtual HW blocks
+ ******************************************************************************/
+
+#define MAX_PIPES 6
+
+struct gamma_curve {
+ uint32_t offset;
+ uint32_t segments_num;
+};
+
+struct curve_points {
+ struct fixed31_32 x;
+ struct fixed31_32 y;
+ struct fixed31_32 offset;
+ struct fixed31_32 slope;
+
+ uint32_t custom_float_x;
+ uint32_t custom_float_y;
+ uint32_t custom_float_offset;
+ uint32_t custom_float_slope;
+};
+
+struct pwl_result_data {
+ struct fixed31_32 red;
+ struct fixed31_32 green;
+ struct fixed31_32 blue;
+
+ struct fixed31_32 delta_red;
+ struct fixed31_32 delta_green;
+ struct fixed31_32 delta_blue;
+
+ uint32_t red_reg;
+ uint32_t green_reg;
+ uint32_t blue_reg;
+
+ uint32_t delta_red_reg;
+ uint32_t delta_green_reg;
+ uint32_t delta_blue_reg;
+};
+
+struct pwl_params {
+ struct gamma_curve arr_curve_points[34];
+ struct curve_points arr_points[3];
+ struct pwl_result_data rgb_resulted[256 + 3];
+ uint32_t hw_points_num;
+};
+
+/* move to dpp
+ * while we are moving functionality out of opp to dpp to align
+ * HW programming to HW IP, we define these struct in hw_shared
+ * so we can still compile while refactoring
+ */
+
+enum lb_pixel_depth {
+ /* do not change the values because it is used as bit vector */
+ LB_PIXEL_DEPTH_18BPP = 1,
+ LB_PIXEL_DEPTH_24BPP = 2,
+ LB_PIXEL_DEPTH_30BPP = 4,
+ LB_PIXEL_DEPTH_36BPP = 8
+};
+
+enum graphics_csc_adjust_type {
+ GRAPHICS_CSC_ADJUST_TYPE_BYPASS = 0,
+ GRAPHICS_CSC_ADJUST_TYPE_HW, /* without adjustments */
+ GRAPHICS_CSC_ADJUST_TYPE_SW /*use adjustments */
+};
+
+enum ipp_degamma_mode {
+ IPP_DEGAMMA_MODE_BYPASS,
+ IPP_DEGAMMA_MODE_HW_sRGB,
+ IPP_DEGAMMA_MODE_HW_xvYCC,
+ IPP_DEGAMMA_MODE_USER_PWL
+};
+
+enum ipp_output_format {
+ IPP_OUTPUT_FORMAT_12_BIT_FIX,
+ IPP_OUTPUT_FORMAT_16_BIT_BYPASS,
+ IPP_OUTPUT_FORMAT_FLOAT
+};
+
+enum expansion_mode {
+ EXPANSION_MODE_DYNAMIC,
+ EXPANSION_MODE_ZERO
+};
+
+struct default_adjustment {
+ enum lb_pixel_depth lb_color_depth;
+ enum dc_color_space out_color_space;
+ enum dc_color_space in_color_space;
+ enum dc_color_depth color_depth;
+ enum pixel_format surface_pixel_format;
+ enum graphics_csc_adjust_type csc_adjust_type;
+ bool force_hw_default;
+};
+
+struct out_csc_color_matrix {
+ enum dc_color_space color_space;
+ uint16_t regval[12];
+};
+
+enum opp_regamma {
+ OPP_REGAMMA_BYPASS = 0,
+ OPP_REGAMMA_SRGB,
+ OPP_REGAMMA_3_6,
+ OPP_REGAMMA_USER
+};
+
+#endif /* __DAL_HW_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
new file mode 100644
index 000000000000..f11aa484f46e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IPP_H__
+#define __DAL_IPP_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+
+#define MAXTRIX_COEFFICIENTS_NUMBER 12
+#define MAXTRIX_COEFFICIENTS_WRAP_NUMBER (MAXTRIX_COEFFICIENTS_NUMBER + 4)
+#define MAX_OVL_MATRIX_COUNT 12
+
+/* IPP RELATED */
+struct input_pixel_processor {
+ struct dc_context *ctx;
+ unsigned int inst;
+ const struct ipp_funcs *funcs;
+};
+
+enum ipp_prescale_mode {
+ IPP_PRESCALE_MODE_BYPASS,
+ IPP_PRESCALE_MODE_FIXED_SIGNED,
+ IPP_PRESCALE_MODE_FLOAT_SIGNED,
+ IPP_PRESCALE_MODE_FIXED_UNSIGNED,
+ IPP_PRESCALE_MODE_FLOAT_UNSIGNED
+};
+
+struct ipp_prescale_params {
+ enum ipp_prescale_mode mode;
+ uint16_t bias;
+ uint16_t scale;
+};
+
+
+
+enum ovl_color_space {
+ OVL_COLOR_SPACE_UNKNOWN = 0,
+ OVL_COLOR_SPACE_RGB,
+ OVL_COLOR_SPACE_YUV601,
+ OVL_COLOR_SPACE_YUV709
+};
+
+
+struct ipp_funcs {
+
+ /*** cursor ***/
+ void (*ipp_cursor_set_position)(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_position *position,
+ const struct dc_cursor_mi_param *param);
+
+ void (*ipp_cursor_set_attributes)(
+ struct input_pixel_processor *ipp,
+ const struct dc_cursor_attributes *attributes);
+
+ /*** setup input pixel processing ***/
+
+ /* put the entire pixel processor to bypass */
+ void (*ipp_full_bypass)(
+ struct input_pixel_processor *ipp);
+
+ /* setup ipp to expand/convert input to pixel processor internal format */
+ void (*ipp_setup)(
+ struct input_pixel_processor *ipp,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ /* DCE function to setup IPP. TODO: see if we can consolidate to setup */
+ void (*ipp_program_prescale)(
+ struct input_pixel_processor *ipp,
+ struct ipp_prescale_params *params);
+
+ void (*ipp_program_input_lut)(
+ struct input_pixel_processor *ipp,
+ const struct dc_gamma *gamma);
+
+ /*** DEGAMMA RELATED ***/
+ void (*ipp_set_degamma)(
+ struct input_pixel_processor *ipp,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_degamma_pwl)(
+ struct input_pixel_processor *ipp,
+ const struct pwl_params *params);
+
+ void (*ipp_destroy)(struct input_pixel_processor **ipp);
+};
+
+#endif /* __DAL_IPP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
new file mode 100644
index 000000000000..3d33bcda7059
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -0,0 +1,134 @@
+/*
+ * link_encoder.h
+ *
+ * Created on: Oct 6, 2015
+ * Author: yonsun
+ */
+
+#ifndef LINK_ENCODER_H_
+#define LINK_ENCODER_H_
+
+#include "grph_object_defs.h"
+#include "signal_types.h"
+#include "dc_types.h"
+
+struct dc_context;
+struct encoder_set_dp_phy_pattern_param;
+struct link_mst_stream_allocation_table;
+struct dc_link_settings;
+struct link_training_settings;
+struct pipe_ctx;
+
+struct encoder_init_data {
+ enum channel_id channel;
+ struct graphics_object_id connector;
+ enum hpd_source_id hpd_source;
+ /* TODO: in DAL2, here was pointer to EventManagerInterface */
+ struct graphics_object_id encoder;
+ struct dc_context *ctx;
+ enum transmitter transmitter;
+};
+
+struct encoder_feature_support {
+ union {
+ struct {
+ uint32_t IS_HBR2_CAPABLE:1;
+ uint32_t IS_HBR3_CAPABLE:1;
+ uint32_t IS_TPS3_CAPABLE:1;
+ uint32_t IS_TPS4_CAPABLE:1;
+ uint32_t IS_YCBCR_CAPABLE:1;
+ uint32_t HDMI_6GB_EN:1;
+ } bits;
+ uint32_t raw;
+ } flags;
+
+ enum dc_color_depth max_hdmi_deep_color;
+ unsigned int max_hdmi_pixel_clock;
+ bool ycbcr420_supported;
+};
+
+union dpcd_psr_configuration {
+ struct {
+ unsigned char ENABLE : 1;
+ unsigned char TRANSMITTER_ACTIVE_IN_PSR : 1;
+ unsigned char CRC_VERIFICATION : 1;
+ unsigned char FRAME_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char LINE_CAPTURE_INDICATION : 1;
+ /* For eDP 1.4, PSR v2*/
+ unsigned char IRQ_HPD_WITH_CRC_ERROR : 1;
+ unsigned char RESERVED : 2;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_error_status {
+ struct {
+ unsigned char LINK_CRC_ERROR :1;
+ unsigned char RFB_STORAGE_ERROR :1;
+ unsigned char RESERVED :6;
+ } bits;
+ unsigned char raw;
+};
+
+union psr_sink_psr_status {
+ struct {
+ unsigned char SINK_SELF_REFRESH_STATUS :3;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
+struct link_encoder {
+ const struct link_encoder_funcs *funcs;
+ int32_t aux_channel_offset;
+ struct dc_context *ctx;
+ struct graphics_object_id id;
+ struct graphics_object_id connector;
+ uint32_t output_signals;
+ enum engine_id preferred_engine;
+ struct encoder_feature_support features;
+ enum transmitter transmitter;
+ enum hpd_source_id hpd_source;
+};
+
+struct link_encoder_funcs {
+ bool (*validate_output_with_stream)(
+ struct link_encoder *enc, const struct dc_stream_state *stream);
+ void (*hw_init)(struct link_encoder *enc);
+ void (*setup)(struct link_encoder *enc,
+ enum signal_type signal);
+ void (*enable_tmds_output)(struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock);
+ void (*enable_dp_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+ void (*enable_dp_mst_output)(struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source);
+ void (*disable_output)(struct link_encoder *link_enc,
+ enum signal_type signal, struct dc_link *link);
+ void (*dp_set_lane_settings)(struct link_encoder *enc,
+ const struct link_training_settings *link_settings);
+ void (*dp_set_phy_pattern)(struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *para);
+ void (*update_mst_stream_allocation_table)(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+ void (*psr_program_dp_dphy_fast_training)(struct link_encoder *enc,
+ bool exit_link_training_required);
+ void (*psr_program_secondary_packet)(struct link_encoder *enc,
+ unsigned int sdp_transmit_line_num_deadline);
+ void (*connect_dig_be_to_fe)(struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect);
+ void (*enable_hpd)(struct link_encoder *enc);
+ void (*disable_hpd)(struct link_encoder *enc);
+ void (*destroy)(struct link_encoder **enc);
+};
+
+#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
new file mode 100644
index 000000000000..3e1e7e6a8792
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_MEM_INPUT_H__
+#define __DAL_MEM_INPUT_H__
+
+#include "dc.h"
+#include "include/grph_object_id.h"
+
+#include "dml/display_mode_structs.h"
+
+struct dchub_init_data;
+struct cstate_pstate_watermarks_st {
+ uint32_t cstate_exit_ns;
+ uint32_t cstate_enter_plus_exit_ns;
+ uint32_t pstate_change_ns;
+};
+
+struct dcn_watermarks {
+ uint32_t pte_meta_urgent_ns;
+ uint32_t urgent_ns;
+ struct cstate_pstate_watermarks_st cstate_pstate;
+};
+
+struct dcn_watermark_set {
+ struct dcn_watermarks a;
+ struct dcn_watermarks b;
+ struct dcn_watermarks c;
+ struct dcn_watermarks d;
+};
+
+struct dce_watermarks {
+ int a_mark;
+ int b_mark;
+ int c_mark;
+ int d_mark;
+};
+
+struct stutter_modes {
+ bool enhanced;
+ bool quad_dmif_buffer;
+ bool watermark_nb_pstate;
+};
+
+struct mem_input {
+ struct mem_input_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_plane_address request_address;
+ struct dc_plane_address current_address;
+ int inst;
+ struct stutter_modes stutter_mode;
+};
+
+struct vm_system_aperture_param {
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct vm_context0_param {
+ PHYSICAL_ADDRESS_LOC pte_base;
+ PHYSICAL_ADDRESS_LOC pte_start;
+ PHYSICAL_ADDRESS_LOC pte_end;
+ PHYSICAL_ADDRESS_LOC fault_default;
+};
+
+struct mem_input_funcs {
+ void (*mem_input_setup)(
+ struct mem_input *mem_input,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+ void (*dcc_control)(struct mem_input *mem_input, bool enable,
+ bool independent_64b_blks);
+ void (*mem_program_viewport)(
+ struct mem_input *mem_input,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+ void (*mem_input_program_display_marks)(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns);
+
+ void (*mem_input_program_chroma_display_marks)(
+ struct mem_input *mem_input,
+ struct dce_watermarks nbp,
+ struct dce_watermarks stutter,
+ struct dce_watermarks urgent,
+ uint32_t total_dest_line_time_ns);
+
+ void (*allocate_mem_input)(
+ struct mem_input *mem_input,
+ uint32_t h_total,/* for current target */
+ uint32_t v_total,/* for current target */
+ uint32_t pix_clk_khz,/* for current target */
+ uint32_t total_streams_num);
+
+ void (*free_mem_input)(
+ struct mem_input *mem_input,
+ uint32_t paths_num);
+
+ bool (*mem_input_program_surface_flip_and_addr)(
+ struct mem_input *mem_input,
+ const struct dc_plane_address *address,
+ bool flip_immediate);
+
+ void (*mem_input_program_pte_vm)(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ enum dc_rotation_angle rotation);
+
+ void (*mem_input_set_vm_system_aperture_settings)(
+ struct mem_input *mem_input,
+ struct vm_system_aperture_param *apt);
+
+ void (*mem_input_set_vm_context0_settings)(
+ struct mem_input *mem_input,
+ const struct vm_context0_param *vm0);
+
+ void (*mem_input_program_surface_config)(
+ struct mem_input *mem_input,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror);
+
+ bool (*mem_input_is_flip_pending)(struct mem_input *mem_input);
+
+ void (*mem_input_update_dchub)(struct mem_input *mem_input,
+ struct dchub_init_data *dh_data);
+
+ void (*set_blank)(struct mem_input *mi, bool blank);
+ void (*set_hubp_blank_en)(struct mem_input *mi, bool blank);
+
+ void (*set_cursor_attributes)(
+ struct mem_input *mem_input,
+ const struct dc_cursor_attributes *attr);
+
+ void (*set_cursor_position)(
+ struct mem_input *mem_input,
+ const struct dc_cursor_position *pos,
+ const struct dc_cursor_mi_param *param);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
new file mode 100644
index 000000000000..d4188b2c0626
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -0,0 +1,61 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_H__
+#define __DC_MPCC_H__
+
+#include "dc_hw_types.h"
+#include "opp.h"
+
+struct mpcc_cfg {
+ int dpp_id;
+ int opp_id;
+ struct mpc_tree_cfg *tree_cfg;
+ unsigned int z_index;
+
+ struct tg_color black_color;
+ bool per_pixel_alpha;
+ bool pre_multiplied_alpha;
+};
+
+struct mpc {
+ const struct mpc_funcs *funcs;
+ struct dc_context *ctx;
+};
+
+struct mpc_funcs {
+ int (*add)(struct mpc *mpc, struct mpcc_cfg *cfg);
+
+ void (*remove)(struct mpc *mpc,
+ struct mpc_tree_cfg *tree_cfg,
+ int opp_id,
+ int mpcc_inst);
+
+ void (*wait_for_idle)(struct mpc *mpc, int id);
+
+ void (*update_blend_mode)(struct mpc *mpc, struct mpcc_cfg *cfg);
+
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
new file mode 100644
index 000000000000..75adb8fec551
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_OPP_H__
+#define __DAL_OPP_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+#include "transform.h"
+
+struct fixed31_32;
+
+/* TODO: Need cleanup */
+enum clamping_range {
+ CLAMPING_FULL_RANGE = 0, /* No Clamping */
+ CLAMPING_LIMITED_RANGE_8BPC, /* 8 bpc: Clamping 1 to FE */
+ CLAMPING_LIMITED_RANGE_10BPC, /* 10 bpc: Clamping 4 to 3FB */
+ CLAMPING_LIMITED_RANGE_12BPC, /* 12 bpc: Clamping 10 to FEF */
+ /* Use programmable clampping value on FMT_CLAMP_COMPONENT_R/G/B. */
+ CLAMPING_LIMITED_RANGE_PROGRAMMABLE
+};
+
+struct clamping_and_pixel_encoding_params {
+ enum dc_pixel_encoding pixel_encoding; /* Pixel Encoding */
+ enum clamping_range clamping_level; /* Clamping identifier */
+ enum dc_color_depth c_depth; /* Deep color use. */
+};
+
+struct bit_depth_reduction_params {
+ struct {
+ /* truncate/round */
+ /* trunc/round enabled*/
+ uint32_t TRUNCATE_ENABLED:1;
+ /* 2 bits: 0=6 bpc, 1=8 bpc, 2 = 10bpc*/
+ uint32_t TRUNCATE_DEPTH:2;
+ /* truncate or round*/
+ uint32_t TRUNCATE_MODE:1;
+
+ /* spatial dither */
+ /* Spatial Bit Depth Reduction enabled*/
+ uint32_t SPATIAL_DITHER_ENABLED:1;
+ /* 2 bits: 0=6 bpc, 1 = 8 bpc, 2 = 10bpc*/
+ uint32_t SPATIAL_DITHER_DEPTH:2;
+ /* 0-3 to select patterns*/
+ uint32_t SPATIAL_DITHER_MODE:2;
+ /* Enable RGB random dithering*/
+ uint32_t RGB_RANDOM:1;
+ /* Enable Frame random dithering*/
+ uint32_t FRAME_RANDOM:1;
+ /* Enable HighPass random dithering*/
+ uint32_t HIGHPASS_RANDOM:1;
+
+ /* temporal dither*/
+ /* frame modulation enabled*/
+ uint32_t FRAME_MODULATION_ENABLED:1;
+ /* same as for trunc/spatial*/
+ uint32_t FRAME_MODULATION_DEPTH:2;
+ /* 2/4 gray levels*/
+ uint32_t TEMPORAL_LEVEL:1;
+ uint32_t FRC25:2;
+ uint32_t FRC50:2;
+ uint32_t FRC75:2;
+ } flags;
+
+ uint32_t r_seed_value;
+ uint32_t b_seed_value;
+ uint32_t g_seed_value;
+ enum dc_pixel_encoding pixel_encoding;
+};
+
+enum wide_gamut_regamma_mode {
+ /* 0x0 - BITS2:0 Bypass */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_BYPASS,
+ /* 0x1 - Fixed curve sRGB 2.4 */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_SRGB24,
+ /* 0x2 - Fixed curve xvYCC 2.22 */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_XYYCC22,
+ /* 0x3 - Programmable control A */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_A,
+ /* 0x4 - Programmable control B */
+ WIDE_GAMUT_REGAMMA_MODE_GRAPHICS_MATRIX_B,
+ /* 0x0 - BITS6:4 Bypass */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_BYPASS,
+ /* 0x1 - Fixed curve sRGB 2.4 */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_SRGB24,
+ /* 0x2 - Fixed curve xvYCC 2.22 */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_XYYCC22,
+ /* 0x3 - Programmable control A */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_A,
+ /* 0x4 - Programmable control B */
+ WIDE_GAMUT_REGAMMA_MODE_OVL_MATRIX_B
+};
+
+struct gamma_pixel {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+};
+
+enum channel_name {
+ CHANNEL_NAME_RED,
+ CHANNEL_NAME_GREEN,
+ CHANNEL_NAME_BLUE
+};
+
+struct custom_float_format {
+ uint32_t mantissa_bits;
+ uint32_t exponenta_bits;
+ bool sign;
+};
+
+struct custom_float_value {
+ uint32_t mantissa;
+ uint32_t exponenta;
+ uint32_t value;
+ bool negative;
+};
+
+struct hw_x_point {
+ uint32_t custom_float_x;
+ struct fixed31_32 x;
+ struct fixed31_32 regamma_y_red;
+ struct fixed31_32 regamma_y_green;
+ struct fixed31_32 regamma_y_blue;
+
+};
+
+struct pwl_float_data_ex {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+ struct fixed31_32 delta_r;
+ struct fixed31_32 delta_g;
+ struct fixed31_32 delta_b;
+};
+
+enum hw_point_position {
+ /* hw point sits between left and right sw points */
+ HW_POINT_POSITION_MIDDLE,
+ /* hw point lays left from left (smaller) sw point */
+ HW_POINT_POSITION_LEFT,
+ /* hw point lays stays from right (bigger) sw point */
+ HW_POINT_POSITION_RIGHT
+};
+
+struct gamma_point {
+ int32_t left_index;
+ int32_t right_index;
+ enum hw_point_position pos;
+ struct fixed31_32 coeff;
+};
+
+struct pixel_gamma_point {
+ struct gamma_point r;
+ struct gamma_point g;
+ struct gamma_point b;
+};
+
+struct gamma_coefficients {
+ struct fixed31_32 a0[3];
+ struct fixed31_32 a1[3];
+ struct fixed31_32 a2[3];
+ struct fixed31_32 a3[3];
+ struct fixed31_32 user_gamma[3];
+ struct fixed31_32 user_contrast;
+ struct fixed31_32 user_brightness;
+};
+
+struct pwl_float_data {
+ struct fixed31_32 r;
+ struct fixed31_32 g;
+ struct fixed31_32 b;
+};
+
+struct mpc_tree_cfg {
+ int num_pipes;
+ int dpp[MAX_PIPES];
+ int mpcc[MAX_PIPES];
+};
+
+struct output_pixel_processor {
+ struct dc_context *ctx;
+ uint32_t inst;
+ struct pwl_params regamma_params;
+ struct mpc_tree_cfg mpc_tree;
+ bool mpcc_disconnect_pending[MAX_PIPES];
+ const struct opp_funcs *funcs;
+};
+
+enum fmt_stereo_action {
+ FMT_STEREO_ACTION_ENABLE = 0,
+ FMT_STEREO_ACTION_DISABLE,
+ FMT_STEREO_ACTION_UPDATE_POLARITY
+};
+
+struct opp_grph_csc_adjustment {
+ //enum grph_color_adjust_option color_adjust_option;
+ enum dc_color_space c_space;
+ enum dc_color_depth color_depth; /* clean up to uint32_t */
+ enum graphics_csc_adjust_type csc_adjust_type;
+ int32_t adjust_divider;
+ int32_t grph_cont;
+ int32_t grph_sat;
+ int32_t grph_bright;
+ int32_t grph_hue;
+};
+
+/* Underlay related types */
+
+struct hw_adjustment_range {
+ int32_t hw_default;
+ int32_t min;
+ int32_t max;
+ int32_t step;
+ uint32_t divider; /* (actually HW range is min/divider; divider !=0) */
+};
+
+enum ovl_csc_adjust_item {
+ OVERLAY_BRIGHTNESS = 0,
+ OVERLAY_GAMMA,
+ OVERLAY_CONTRAST,
+ OVERLAY_SATURATION,
+ OVERLAY_HUE,
+ OVERLAY_ALPHA,
+ OVERLAY_ALPHA_PER_PIX,
+ OVERLAY_COLOR_TEMPERATURE
+};
+
+struct opp_funcs {
+
+
+ /* FORMATTER RELATED */
+
+ void (*opp_program_fmt)(
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+ void (*opp_set_dyn_expansion)(
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth,
+ enum signal_type signal);
+
+ void (*opp_program_bit_depth_reduction)(
+ struct output_pixel_processor *opp,
+ const struct bit_depth_reduction_params *params);
+
+ /* underlay related */
+ void (*opp_get_underlay_adjustment_range)(
+ struct output_pixel_processor *opp,
+ enum ovl_csc_adjust_item overlay_adjust_item,
+ struct hw_adjustment_range *range);
+
+ void (*opp_destroy)(struct output_pixel_processor **opp);
+
+ void (*opp_set_stereo_polarity)(
+ struct output_pixel_processor *opp,
+ bool enable,
+ bool rightEyePolarity);
+
+ void (*opp_set_test_pattern)(
+ struct output_pixel_processor *opp,
+ bool enable);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
new file mode 100644
index 000000000000..3050afe8e8a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -0,0 +1,130 @@
+/*
+ * stream_encoder.h
+ *
+ */
+
+#ifndef STREAM_ENCODER_H_
+#define STREAM_ENCODER_H_
+
+#include "audio_types.h"
+
+struct dc_bios;
+struct dc_context;
+struct dc_crtc_timing;
+
+struct encoder_info_packet {
+ bool valid;
+ uint8_t hb0;
+ uint8_t hb1;
+ uint8_t hb2;
+ uint8_t hb3;
+ uint8_t sb[32];
+};
+
+struct encoder_info_frame {
+ /* auxiliary video information */
+ struct encoder_info_packet avi;
+ struct encoder_info_packet gamut;
+ struct encoder_info_packet vendor;
+ /* source product description */
+ struct encoder_info_packet spd;
+ /* video stream configuration */
+ struct encoder_info_packet vsc;
+ /* HDR Static MetaData */
+ struct encoder_info_packet hdrsmd;
+};
+
+struct encoder_unblank_param {
+ struct dc_link_settings link_settings;
+ unsigned int pixel_clk_khz;
+};
+
+struct encoder_set_dp_phy_pattern_param {
+ enum dp_test_pattern dp_phy_pattern;
+ const uint8_t *custom_pattern;
+ uint32_t custom_pattern_size;
+ enum dp_panel_mode dp_panel_mode;
+};
+
+struct stream_encoder {
+ const struct stream_encoder_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_bios *bp;
+ enum engine_id id;
+};
+
+struct stream_encoder_funcs {
+ void (*dp_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space);
+
+ void (*hdmi_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio);
+
+ void (*dvi_set_stream_attribute)(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link);
+
+ void (*set_mst_bandwidth)(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+ void (*update_hdmi_info_packets)(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_hdmi_info_packets)(
+ struct stream_encoder *enc);
+
+ void (*update_dp_info_packets)(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_dp_info_packets)(
+ struct stream_encoder *enc);
+
+ void (*dp_blank)(
+ struct stream_encoder *enc);
+
+ void (*dp_unblank)(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param);
+
+ void (*audio_mute_control)(
+ struct stream_encoder *enc, bool mute);
+
+ void (*dp_audio_setup)(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+ void (*dp_audio_enable) (
+ struct stream_encoder *enc);
+
+ void (*dp_audio_disable) (
+ struct stream_encoder *enc);
+
+ void (*hdmi_audio_setup)(
+ struct stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info,
+ struct audio_crtc_info *audio_crtc_info);
+
+ void (*hdmi_audio_disable) (
+ struct stream_encoder *enc);
+
+ void (*setup_stereo_sync) (
+ struct stream_encoder *enc,
+ int tg_inst,
+ bool enable);
+
+ void (*set_avmute)(
+ struct stream_encoder *enc, bool enable);
+};
+
+#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
new file mode 100644
index 000000000000..c6ab38c5b2be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TIMING_GENERATOR_TYPES_H__
+#define __DAL_TIMING_GENERATOR_TYPES_H__
+
+struct dc_bios;
+
+/* Contains CRTC vertical/horizontal pixel counters */
+struct crtc_position {
+ int32_t vertical_count;
+ int32_t horizontal_count;
+ int32_t nominal_vcount;
+};
+
+struct dcp_gsl_params {
+ int gsl_group;
+ int gsl_master;
+};
+
+/* define the structure of Dynamic Refresh Mode */
+struct drr_params {
+ uint32_t vertical_total_min;
+ uint32_t vertical_total_max;
+ bool immediate_flip;
+};
+
+#define LEFT_EYE_3D_PRIMARY_SURFACE 1
+#define RIGHT_EYE_3D_PRIMARY_SURFACE 0
+
+enum test_pattern_dyn_range {
+ TEST_PATTERN_DYN_RANGE_VESA = 0,
+ TEST_PATTERN_DYN_RANGE_CEA
+};
+
+enum test_pattern_mode {
+ TEST_PATTERN_MODE_COLORSQUARES_RGB = 0,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR601,
+ TEST_PATTERN_MODE_COLORSQUARES_YCBCR709,
+ TEST_PATTERN_MODE_VERTICALBARS,
+ TEST_PATTERN_MODE_HORIZONTALBARS,
+ TEST_PATTERN_MODE_SINGLERAMP_RGB,
+ TEST_PATTERN_MODE_DUALRAMP_RGB
+};
+
+enum test_pattern_color_format {
+ TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0,
+ TEST_PATTERN_COLOR_FORMAT_BPC_8,
+ TEST_PATTERN_COLOR_FORMAT_BPC_10,
+ TEST_PATTERN_COLOR_FORMAT_BPC_12
+};
+
+enum controller_dp_test_pattern {
+ CONTROLLER_DP_TEST_PATTERN_D102 = 0,
+ CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR,
+ CONTROLLER_DP_TEST_PATTERN_PRBS7,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES,
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS,
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS,
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_8,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_9,
+ CONTROLLER_DP_TEST_PATTERN_RESERVED_A,
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA
+};
+
+enum crtc_state {
+ CRTC_STATE_VBLANK = 0,
+ CRTC_STATE_VACTIVE
+};
+
+struct _dlg_otg_param {
+ int vstartup_start;
+ int vupdate_offset;
+ int vupdate_width;
+ int vready_offset;
+ enum signal_type signal;
+};
+
+struct crtc_stereo_flags {
+ uint8_t PROGRAM_STEREO : 1;
+ uint8_t PROGRAM_POLARITY : 1;
+ uint8_t RIGHT_EYE_POLARITY : 1;
+ uint8_t FRAME_PACKED : 1;
+ uint8_t DISABLE_STEREO_DP_SYNC : 1;
+};
+
+struct timing_generator {
+ const struct timing_generator_funcs *funcs;
+ struct dc_bios *bp;
+ struct dc_context *ctx;
+ struct _dlg_otg_param dlg_otg_param;
+ int inst;
+};
+
+struct dc_crtc_timing;
+
+struct drr_params;
+
+struct timing_generator_funcs {
+ bool (*validate_timing)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing);
+ void (*program_timing)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing,
+ bool use_vbios);
+ bool (*enable_crtc)(struct timing_generator *tg);
+ bool (*disable_crtc)(struct timing_generator *tg);
+ bool (*is_counter_moving)(struct timing_generator *tg);
+ void (*get_position)(struct timing_generator *tg,
+ struct crtc_position *position);
+
+ uint32_t (*get_frame_count)(struct timing_generator *tg);
+ void (*get_scanoutpos)(
+ struct timing_generator *tg,
+ uint32_t *v_blank_start,
+ uint32_t *v_blank_end,
+ uint32_t *h_position,
+ uint32_t *v_position);
+ void (*set_early_control)(struct timing_generator *tg,
+ uint32_t early_cntl);
+ void (*wait_for_state)(struct timing_generator *tg,
+ enum crtc_state state);
+ void (*set_blank)(struct timing_generator *tg,
+ bool enable_blanking);
+ bool (*is_blanked)(struct timing_generator *tg);
+ void (*set_overscan_blank_color) (struct timing_generator *tg, const struct tg_color *color);
+ void (*set_blank_color)(struct timing_generator *tg, const struct tg_color *color);
+ void (*set_colors)(struct timing_generator *tg,
+ const struct tg_color *blank_color,
+ const struct tg_color *overscan_color);
+
+ void (*disable_vga)(struct timing_generator *tg);
+ bool (*did_triggered_reset_occur)(struct timing_generator *tg);
+ void (*setup_global_swap_lock)(struct timing_generator *tg,
+ const struct dcp_gsl_params *gsl_params);
+ void (*unlock)(struct timing_generator *tg);
+ void (*lock)(struct timing_generator *tg);
+ void (*enable_reset_trigger)(struct timing_generator *tg, int source_tg_inst);
+ void (*disable_reset_trigger)(struct timing_generator *tg);
+ void (*tear_down_global_swap_lock)(struct timing_generator *tg);
+ void (*enable_advanced_request)(struct timing_generator *tg,
+ bool enable, const struct dc_crtc_timing *timing);
+ void (*set_drr)(struct timing_generator *tg, const struct drr_params *params);
+ void (*set_static_screen_control)(struct timing_generator *tg,
+ uint32_t value);
+ void (*set_test_pattern)(
+ struct timing_generator *tg,
+ enum controller_dp_test_pattern test_pattern,
+ enum dc_color_depth color_depth);
+
+ bool (*arm_vert_intr)(struct timing_generator *tg, uint8_t width);
+
+ void (*program_global_sync)(struct timing_generator *tg);
+ void (*enable_optc_clock)(struct timing_generator *tg, bool enable);
+ void (*program_stereo)(struct timing_generator *tg,
+ const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
+ bool (*is_stereo_left_eye)(struct timing_generator *tg);
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
new file mode 100644
index 000000000000..ea88997e1bbd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TRANSFORM_H__
+#define __DAL_TRANSFORM_H__
+
+#include "hw_shared.h"
+#include "dc_hw_types.h"
+#include "fixed31_32.h"
+
+#define CSC_TEMPERATURE_MATRIX_SIZE 9
+
+struct bit_depth_reduction_params;
+
+struct transform {
+ const struct transform_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ struct dpp_caps *caps;
+ struct pwl_params regamma_params;
+};
+
+/* Colorimetry */
+enum colorimetry {
+ COLORIMETRY_NO_DATA = 0,
+ COLORIMETRY_ITU601 = 1,
+ COLORIMETRY_ITU709 = 2,
+ COLORIMETRY_EXTENDED = 3
+};
+
+enum colorimetry_ext {
+ COLORIMETRYEX_XVYCC601 = 0,
+ COLORIMETRYEX_XVYCC709 = 1,
+ COLORIMETRYEX_SYCC601 = 2,
+ COLORIMETRYEX_ADOBEYCC601 = 3,
+ COLORIMETRYEX_ADOBERGB = 4,
+ COLORIMETRYEX_BT2020YCC = 5,
+ COLORIMETRYEX_BT2020RGBYCBCR = 6,
+ COLORIMETRYEX_RESERVED = 7
+};
+
+enum active_format_info {
+ ACTIVE_FORMAT_NO_DATA = 0,
+ ACTIVE_FORMAT_VALID = 1
+};
+
+/* Active format aspect ratio */
+enum active_format_aspect_ratio {
+ ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE = 8,
+ ACTIVE_FORMAT_ASPECT_RATIO_4_3 = 9,
+ ACTIVE_FORMAT_ASPECT_RATIO_16_9 = 0XA,
+ ACTIVE_FORMAT_ASPECT_RATIO_14_9 = 0XB
+};
+
+enum bar_info {
+ BAR_INFO_NOT_VALID = 0,
+ BAR_INFO_VERTICAL_VALID = 1,
+ BAR_INFO_HORIZONTAL_VALID = 2,
+ BAR_INFO_BOTH_VALID = 3
+};
+
+enum picture_scaling {
+ PICTURE_SCALING_UNIFORM = 0,
+ PICTURE_SCALING_HORIZONTAL = 1,
+ PICTURE_SCALING_VERTICAL = 2,
+ PICTURE_SCALING_BOTH = 3
+};
+
+/* RGB quantization range */
+enum rgb_quantization_range {
+ RGB_QUANTIZATION_DEFAULT_RANGE = 0,
+ RGB_QUANTIZATION_LIMITED_RANGE = 1,
+ RGB_QUANTIZATION_FULL_RANGE = 2,
+ RGB_QUANTIZATION_RESERVED = 3
+};
+
+/* YYC quantization range */
+enum yyc_quantization_range {
+ YYC_QUANTIZATION_LIMITED_RANGE = 0,
+ YYC_QUANTIZATION_FULL_RANGE = 1,
+ YYC_QUANTIZATION_RESERVED2 = 2,
+ YYC_QUANTIZATION_RESERVED3 = 3
+};
+
+enum graphics_gamut_adjust_type {
+ GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS = 0,
+ GRAPHICS_GAMUT_ADJUST_TYPE_HW, /* without adjustments */
+ GRAPHICS_GAMUT_ADJUST_TYPE_SW /* use adjustments */
+};
+
+enum lb_memory_config {
+ /* Enable all 3 pieces of memory */
+ LB_MEMORY_CONFIG_0 = 0,
+
+ /* Enable only the first piece of memory */
+ LB_MEMORY_CONFIG_1 = 1,
+
+ /* Enable only the second piece of memory */
+ LB_MEMORY_CONFIG_2 = 2,
+
+ /* Only applicable in 4:2:0 mode, enable all 3 pieces of memory and the
+ * last piece of chroma memory used for the luma storage
+ */
+ LB_MEMORY_CONFIG_3 = 3
+};
+
+struct xfm_grph_csc_adjustment {
+ struct fixed31_32 temperature_matrix[CSC_TEMPERATURE_MATRIX_SIZE];
+ enum graphics_gamut_adjust_type gamut_adjust_type;
+};
+
+struct overscan_info {
+ int left;
+ int right;
+ int top;
+ int bottom;
+};
+
+struct scaling_ratios {
+ struct fixed31_32 horz;
+ struct fixed31_32 vert;
+ struct fixed31_32 horz_c;
+ struct fixed31_32 vert_c;
+};
+
+struct sharpness_adj {
+ int horz;
+ int vert;
+};
+
+struct line_buffer_params {
+ bool alpha_en;
+ bool pixel_expan_mode;
+ bool interleave_en;
+ int dynamic_pixel_depth;
+ enum lb_pixel_depth depth;
+};
+
+struct scl_inits {
+ struct fixed31_32 h;
+ struct fixed31_32 h_c;
+ struct fixed31_32 v;
+ struct fixed31_32 v_bot;
+ struct fixed31_32 v_c;
+ struct fixed31_32 v_c_bot;
+};
+
+struct scaler_data {
+ int h_active;
+ int v_active;
+ struct scaling_taps taps;
+ struct rect viewport;
+ struct rect viewport_c;
+ struct rect recout;
+ struct scaling_ratios ratios;
+ struct scl_inits inits;
+ struct sharpness_adj sharpness;
+ enum pixel_format format;
+ struct line_buffer_params lb_params;
+};
+
+struct transform_funcs {
+ void (*transform_reset)(struct transform *xfm);
+
+ void (*transform_set_scaler)(struct transform *xfm,
+ const struct scaler_data *scl_data);
+
+ void (*transform_set_pixel_storage_depth)(
+ struct transform *xfm,
+ enum lb_pixel_depth depth,
+ const struct bit_depth_reduction_params *bit_depth_params);
+
+ bool (*transform_get_optimal_number_of_taps)(
+ struct transform *xfm,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps);
+
+ void (*transform_set_gamut_remap)(
+ struct transform *xfm,
+ const struct xfm_grph_csc_adjustment *adjust);
+
+ void (*opp_set_csc_default)(
+ struct transform *xfm,
+ const struct default_adjustment *default_adjust);
+
+ void (*opp_set_csc_adjustment)(
+ struct transform *xfm,
+ const struct out_csc_color_matrix *tbl_entry);
+
+ void (*opp_power_on_regamma_lut)(
+ struct transform *xfm,
+ bool power_on);
+
+ void (*opp_program_regamma_lut)(
+ struct transform *xfm,
+ const struct pwl_result_data *rgb,
+ uint32_t num);
+
+ void (*opp_configure_regamma_lut)(
+ struct transform *xfm,
+ bool is_ram_a);
+
+ void (*opp_program_regamma_lutb_settings)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_luta_settings)(
+ struct transform *xfm,
+ const struct pwl_params *params);
+
+ void (*opp_program_regamma_pwl)(
+ struct transform *xfm, const struct pwl_params *params);
+
+ void (*opp_set_regamma_mode)(
+ struct transform *xfm_base,
+ enum opp_regamma mode);
+
+ void (*ipp_set_degamma)(
+ struct transform *xfm_base,
+ enum ipp_degamma_mode mode);
+
+ void (*ipp_program_input_lut)(
+ struct transform *xfm_base,
+ const struct dc_gamma *gamma);
+
+ void (*ipp_program_degamma_pwl)(struct transform *xfm_base,
+ const struct pwl_params *params);
+
+ void (*ipp_setup)(
+ struct transform *xfm_base,
+ enum surface_pixel_format input_format,
+ enum expansion_mode mode);
+
+ void (*ipp_full_bypass)(struct transform *xfm_base);
+
+ void (*set_cursor_attributes)(
+ struct transform *xfm_base,
+ const struct dc_cursor_attributes *attr);
+
+};
+
+const uint16_t *get_filter_2tap_16p(void);
+const uint16_t *get_filter_2tap_64p(void);
+const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio);
+const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio);
+const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio);
+const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio);
+
+
+/* Defines the pixel processing capability of the DSCL */
+enum dscl_data_processing_format {
+ DSCL_DATA_PRCESSING_FIXED_FORMAT, /* The DSCL processes pixel data in fixed format */
+ DSCL_DATA_PRCESSING_FLOAT_FORMAT, /* The DSCL processes pixel data in float format */
+};
+
+/*
+ * The DPP capabilities structure contains enumerations to specify the
+ * HW processing features and an associated function pointers to
+ * provide the function interface that can be overloaded for implementations
+ * based on different capabilities
+ */
+struct dpp_caps {
+ /* DSCL processing pixel data in fixed or float format */
+ enum dscl_data_processing_format dscl_data_proc_format;
+
+ /* Calculates the number of partitions in the line buffer.
+ * The implementation of this function is overloaded for
+ * different versions of DSCL LB.
+ */
+ void (*dscl_calc_lb_num_partitions)(
+ const struct scaler_data *scl_data,
+ enum lb_memory_config lb_config,
+ int *num_part_y,
+ int *num_part_c);
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
new file mode 100644
index 000000000000..8734689a9245
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HW_SEQUENCER_H__
+#define __DC_HW_SEQUENCER_H__
+#include "dc_types.h"
+#include "clock_source.h"
+#include "inc/hw/timing_generator.h"
+#include "inc/hw/link_encoder.h"
+#include "core_status.h"
+
+enum pipe_gating_control {
+ PIPE_GATING_CONTROL_DISABLE = 0,
+ PIPE_GATING_CONTROL_ENABLE,
+ PIPE_GATING_CONTROL_INIT
+};
+
+struct dce_hwseq_wa {
+ bool blnd_crtc_trigger;
+};
+
+struct dce_hwseq {
+ struct dc_context *ctx;
+ const struct dce_hwseq_registers *regs;
+ const struct dce_hwseq_shift *shifts;
+ const struct dce_hwseq_mask *masks;
+ struct dce_hwseq_wa wa;
+};
+
+struct pipe_ctx;
+struct dc_state;
+struct dchub_init_data;
+struct dc_static_screen_events;
+struct resource_pool;
+struct resource_context;
+
+struct hw_sequencer_funcs {
+
+ void (*init_hw)(struct dc *dc);
+
+ enum dc_status (*apply_ctx_to_hw)(
+ struct dc *dc, struct dc_state *context);
+
+ void (*reset_hw_ctx_wrap)(
+ struct dc *dc, struct dc_state *context);
+
+ void (*apply_ctx_for_surface)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ int num_planes,
+ struct dc_state *context);
+
+ void (*set_plane_config)(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct resource_context *res_ctx);
+
+ void (*program_gamut_remap)(
+ struct pipe_ctx *pipe_ctx);
+
+ void (*program_csc_matrix)(
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix);
+
+ void (*update_plane_addr)(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+ void (*update_dchub)(
+ struct dce_hwseq *hws,
+ struct dchub_init_data *dh_data);
+
+ void (*update_pending_status)(
+ struct pipe_ctx *pipe_ctx);
+
+ bool (*set_input_transfer_func)(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state);
+
+ bool (*set_output_transfer_func)(
+ struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream);
+
+ void (*power_down)(struct dc *dc);
+
+ void (*enable_accelerated_mode)(struct dc *dc);
+
+ void (*enable_timing_synchronization)(
+ struct dc *dc,
+ int group_index,
+ int group_size,
+ struct pipe_ctx *grouped_pipes[]);
+
+ void (*enable_display_pipe_clock_gating)(
+ struct dc_context *ctx,
+ bool clock_gating);
+
+ bool (*enable_display_power_gating)(
+ struct dc *dc,
+ uint8_t controller_id,
+ struct dc_bios *dcb,
+ enum pipe_gating_control power_gating);
+
+ void (*power_down_front_end)(struct dc *dc, int fe_idx);
+
+ void (*power_on_front_end)(struct dc *dc,
+ struct pipe_ctx *pipe,
+ struct dc_state *context);
+
+ void (*update_info_frame)(struct pipe_ctx *pipe_ctx);
+
+ void (*enable_stream)(struct pipe_ctx *pipe_ctx);
+
+ void (*disable_stream)(struct pipe_ctx *pipe_ctx,
+ int option);
+
+ void (*unblank_stream)(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+
+ void (*pipe_control_lock)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+
+ void (*set_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
+ void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ int vmin, int vmax);
+
+ void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
+ struct crtc_position *position);
+
+ void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_events *events);
+
+ enum dc_status (*prog_pixclk_crtc_otg)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
+
+ void (*setup_stereo)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc *dc);
+
+ void (*set_avmute)(struct pipe_ctx *pipe_ctx, bool enable);
+
+ void (*log_hw_state)(struct dc *dc);
+
+ void (*wait_for_mpcc_disconnect)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx);
+
+ void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
+ void (*optimize_shared_resources)(struct dc *dc);
+ void (*edp_power_control)(
+ struct link_encoder *enc,
+ bool enable);
+ void (*edp_backlight_control)(
+ struct dc_link *link,
+ bool enable);
+};
+
+void color_space_to_black_color(
+ const struct dc *dc,
+ enum dc_color_space colorspace,
+ struct tg_color *black_color);
+
+bool hwss_wait_for_blank_complete(
+ struct timing_generator *tg);
+
+#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
new file mode 100644
index 000000000000..f2b8c9a376d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_HWSS_H__
+#define __DC_LINK_HWSS_H__
+
+#include "inc/core_status.h"
+
+enum dc_status core_link_read_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ uint8_t *data,
+ uint32_t size);
+
+enum dc_status core_link_write_dpcd(
+ struct dc_link *link,
+ uint32_t address,
+ const uint8_t *data,
+ uint32_t size);
+
+struct gpio *get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+
+void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+
+void dp_disable_link_phy(struct dc_link *link, enum signal_type signal);
+
+void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal);
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ enum hw_dp_training_pattern pattern);
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_settings);
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size);
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __DC_LINK_HWSS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
new file mode 100644
index 000000000000..77eb72874e90
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_
+
+#include "dm_services.h"
+
+/* macro for register read/write
+ * user of macro need to define
+ *
+ * CTX ==> macro to ptr to dc_context
+ * eg. aud110->base.ctx
+ *
+ * REG ==> macro to location of register offset
+ * eg. aud110->regs->reg
+ */
+#define REG_READ(reg_name) \
+ dm_read_reg(CTX, REG(reg_name))
+
+#define REG_WRITE(reg_name, value) \
+ dm_write_reg(CTX, REG(reg_name), value)
+
+#ifdef REG_SET
+#undef REG_SET
+#endif
+
+#ifdef REG_GET
+#undef REG_GET
+#endif
+
+/* macro to set register fields. */
+#define REG_SET_N(reg_name, n, initial_val, ...) \
+ generic_reg_update_ex(CTX, \
+ REG(reg_name), \
+ initial_val, \
+ n, __VA_ARGS__)
+
+#define FN(reg_name, field) \
+ FD(reg_name##__##field)
+
+#define REG_SET(reg_name, initial_val, field, val) \
+ REG_SET_N(reg_name, 1, initial_val, \
+ FN(reg_name, field), val)
+
+#define REG_SET_2(reg, init_value, f1, v1, f2, v2) \
+ REG_SET_N(reg, 2, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_SET_3(reg, init_value, f1, v1, f2, v2, f3, v3) \
+ REG_SET_N(reg, 3, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3)
+
+#define REG_SET_4(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_SET_N(reg, 4, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4)
+
+#define REG_SET_5(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5) \
+ REG_SET_N(reg, 5, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5)
+
+#define REG_SET_6(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6) \
+ REG_SET_N(reg, 6, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6)
+
+#define REG_SET_7(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6, f7, v7) \
+ REG_SET_N(reg, 7, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6,\
+ FN(reg, f7), v7)
+
+#define REG_SET_8(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, \
+ f5, v5, f6, v6, f7, v7, f8, v8) \
+ REG_SET_N(reg, 8, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2,\
+ FN(reg, f3), v3,\
+ FN(reg, f4), v4,\
+ FN(reg, f5), v5,\
+ FN(reg, f6), v6,\
+ FN(reg, f7), v7,\
+ FN(reg, f8), v8)
+
+#define REG_SET_9(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, f5, \
+ v5, f6, v6, f7, v7, f8, v8, f9, v9) \
+ REG_SET_N(reg, 9, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9)
+
+#define REG_SET_10(reg, init_value, f1, v1, f2, v2, f3, v3, f4, v4, f5, \
+ v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10) \
+ REG_SET_N(reg, 10, init_value, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10)
+
+/* macro to get register fields
+ * read given register and fill in field value in output parameter */
+#define REG_GET(reg_name, field, val) \
+ generic_reg_get(CTX, REG(reg_name), \
+ FN(reg_name, field), val)
+
+#define REG_GET_2(reg_name, f1, v1, f2, v2) \
+ generic_reg_get2(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2)
+
+#define REG_GET_3(reg_name, f1, v1, f2, v2, f3, v3) \
+ generic_reg_get3(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3)
+
+#define REG_GET_4(reg_name, f1, v1, f2, v2, f3, v3, f4, v4) \
+ generic_reg_get4(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4)
+
+#define REG_GET_5(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
+ generic_reg_get5(CTX, REG(reg_name), \
+ FN(reg_name, f1), v1, \
+ FN(reg_name, f2), v2, \
+ FN(reg_name, f3), v3, \
+ FN(reg_name, f4), v4, \
+ FN(reg_name, f5), v5)
+
+/* macro to poll and wait for a register field to read back given value */
+
+#define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \
+ generic_reg_wait(CTX, \
+ REG(reg_name), FN(reg_name, field), val,\
+ delay_between_poll_us, max_try, __func__, __LINE__)
+
+/* macro to update (read, modify, write) register fields
+ */
+#define REG_UPDATE_N(reg_name, n, ...) \
+ generic_reg_update_ex(CTX, \
+ REG(reg_name), \
+ REG_READ(reg_name), \
+ n, __VA_ARGS__)
+
+#define REG_UPDATE(reg_name, field, val) \
+ REG_UPDATE_N(reg_name, 1, \
+ FN(reg_name, field), val)
+
+#define REG_UPDATE_2(reg, f1, v1, f2, v2) \
+ REG_UPDATE_N(reg, 2,\
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2)
+
+#define REG_UPDATE_3(reg, f1, v1, f2, v2, f3, v3) \
+ REG_UPDATE_N(reg, 3, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3)
+
+#define REG_UPDATE_4(reg, f1, v1, f2, v2, f3, v3, f4, v4) \
+ REG_UPDATE_N(reg, 4, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4)
+
+#define REG_UPDATE_5(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5) \
+ REG_UPDATE_N(reg, 5, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5)
+
+#define REG_UPDATE_6(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \
+ REG_UPDATE_N(reg, 6, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6)
+
+#define REG_UPDATE_7(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \
+ REG_UPDATE_N(reg, 7, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7)
+
+#define REG_UPDATE_8(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \
+ REG_UPDATE_N(reg, 8, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8)
+
+#define REG_UPDATE_9(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9) \
+ REG_UPDATE_N(reg, 9, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9)
+
+#define REG_UPDATE_10(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10, v10)\
+ REG_UPDATE_N(reg, 10, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10)
+
+#define REG_UPDATE_14(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14)\
+ REG_UPDATE_N(reg, 14, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14)
+
+#define REG_UPDATE_19(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14, f15, v15, f16, v16, f17, v17, f18, v18, f19, v19)\
+ REG_UPDATE_N(reg, 19, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14, \
+ FN(reg, f15), v15, \
+ FN(reg, f16), v16, \
+ FN(reg, f17), v17, \
+ FN(reg, f18), v18, \
+ FN(reg, f19), v19)
+
+#define REG_UPDATE_20(reg, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8, f9, v9, f10,\
+ v10, f11, v11, f12, v12, f13, v13, f14, v14, f15, v15, f16, v16, f17, v17, f18, v18, f19, v19, f20, v20)\
+ REG_UPDATE_N(reg, 20, \
+ FN(reg, f1), v1,\
+ FN(reg, f2), v2, \
+ FN(reg, f3), v3, \
+ FN(reg, f4), v4, \
+ FN(reg, f5), v5, \
+ FN(reg, f6), v6, \
+ FN(reg, f7), v7, \
+ FN(reg, f8), v8, \
+ FN(reg, f9), v9, \
+ FN(reg, f10), v10, \
+ FN(reg, f11), v11, \
+ FN(reg, f12), v12, \
+ FN(reg, f13), v13, \
+ FN(reg, f14), v14, \
+ FN(reg, f15), v15, \
+ FN(reg, f16), v16, \
+ FN(reg, f17), v17, \
+ FN(reg, f18), v18, \
+ FN(reg, f19), v19, \
+ FN(reg, f20), v20)
+/* macro to update a register field to specified values in given sequences.
+ * useful when toggling bits
+ */
+#define REG_UPDATE_SEQ(reg, field, value1, value2) \
+{ uint32_t val = REG_UPDATE(reg, field, value1); \
+ REG_SET(reg, val, field, value2); }
+
+/* macro to update fields in register 1 field at a time in given order */
+#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
+{ uint32_t val = REG_UPDATE(reg, f1, v1); \
+ REG_SET(reg, val, f2, v2); }
+
+#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
+{ uint32_t val = REG_UPDATE(reg, f1, v1); \
+ val = REG_SET(reg, val, f2, v2); \
+ REG_SET(reg, val, f3, v3); }
+
+uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift, uint32_t mask, uint32_t *field_value);
+
+uint32_t generic_reg_get2(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2);
+
+uint32_t generic_reg_get3(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3);
+
+uint32_t generic_reg_get4(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4);
+
+uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr,
+ uint8_t shift1, uint32_t mask1, uint32_t *field_value1,
+ uint8_t shift2, uint32_t mask2, uint32_t *field_value2,
+ uint8_t shift3, uint32_t mask3, uint32_t *field_value3,
+ uint8_t shift4, uint32_t mask4, uint32_t *field_value4,
+ uint8_t shift5, uint32_t mask5, uint32_t *field_value5);
+
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
new file mode 100644
index 000000000000..5467332faf7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#ifndef DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
+#define DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_
+
+#include "core_types.h"
+#include "core_status.h"
+#include "dal_asic_id.h"
+#include "dm_pp_smu.h"
+
+/* TODO unhardcode, 4 for CZ*/
+#define MEMORY_TYPE_MULTIPLIER 4
+
+enum dce_version resource_parse_asic_id(
+ struct hw_asic_id asic_id);
+
+struct resource_caps {
+ int num_timing_generator;
+ int num_video_plane;
+ int num_audio;
+ int num_stream_encoder;
+ int num_pll;
+ int num_dwb;
+};
+
+struct resource_straps {
+ uint32_t hdmi_disable;
+ uint32_t dc_pinstraps_audio;
+ uint32_t audio_stream_number;
+};
+
+struct resource_create_funcs {
+ void (*read_dce_straps)(
+ struct dc_context *ctx, struct resource_straps *straps);
+
+ struct audio *(*create_audio)(
+ struct dc_context *ctx, unsigned int inst);
+
+ struct stream_encoder *(*create_stream_encoder)(
+ enum engine_id eng_id, struct dc_context *ctx);
+
+ struct dce_hwseq *(*create_hwseq)(
+ struct dc_context *ctx);
+};
+
+bool resource_construct(
+ unsigned int num_virtual_links,
+ struct dc *dc,
+ struct resource_pool *pool,
+ const struct resource_create_funcs *create_funcs);
+
+struct resource_pool *dc_create_resource_pool(
+ struct dc *dc,
+ int num_virtual_links,
+ enum dce_version dc_version,
+ struct hw_asic_id asic_id);
+
+void dc_destroy_resource_pool(struct dc *dc);
+
+enum dc_status resource_map_pool_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx);
+
+enum dc_status resource_build_scaling_params_for_context(
+ const struct dc *dc,
+ struct dc_state *context);
+
+void resource_build_info_frame(struct pipe_ctx *pipe_ctx);
+
+void resource_unreference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
+void resource_reference_clock_source(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
+bool resource_are_streams_timing_synchronizable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2);
+
+struct clock_source *resource_find_used_clk_src_for_sharing(
+ struct resource_context *res_ctx,
+ struct pipe_ctx *pipe_ctx);
+
+struct clock_source *dc_resource_find_first_free_pll(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+struct pipe_ctx *resource_get_head_pipe_for_stream(
+ struct resource_context *res_ctx,
+ struct dc_stream_state *stream);
+
+bool resource_attach_surfaces_to_context(
+ struct dc_plane_state *const *plane_state,
+ int surface_count,
+ struct dc_stream_state *dc_stream,
+ struct dc_state *context,
+ const struct resource_pool *pool);
+
+struct pipe_ctx *find_idle_secondary_pipe(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool);
+
+bool resource_is_stream_unchanged(
+ struct dc_state *old_context, struct dc_stream_state *stream);
+
+bool resource_validate_attach_surfaces(
+ const struct dc_validation_set set[],
+ int set_count,
+ const struct dc_state *old_context,
+ struct dc_state *context,
+ const struct resource_pool *pool);
+
+void validate_guaranteed_copy_streams(
+ struct dc_state *context,
+ int max_streams);
+
+void resource_validate_ctx_update_pointer_after_copy(
+ const struct dc_state *src_ctx,
+ struct dc_state *dst_ctx);
+
+enum dc_status resource_map_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+enum dc_status resource_map_phy_clock_resources(
+ const struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream);
+
+bool pipe_need_reprogram(
+ struct pipe_ctx *pipe_ctx_old,
+ struct pipe_ctx *pipe_ctx);
+
+void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
+ struct bit_depth_reduction_params *fmt_bit_depth);
+
+void update_audio_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct audio *audio,
+ bool acquired);
+#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
new file mode 100644
index 000000000000..c7e93f7223bd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -0,0 +1,48 @@
+#
+# Makefile for the 'audio' sub-component of DAL.
+# It provides the control and status of HW adapter resources,
+# that are global for the ASIC and sharable between pipes.
+
+IRQ = irq_service.o
+
+AMD_DAL_IRQ = $(addprefix $(AMDDALPATH)/dc/irq/,$(IRQ))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ)
+
+###############################################################################
+# DCE 8x
+###############################################################################
+IRQ_DCE80 = irq_service_dce80.o
+
+AMD_DAL_IRQ_DCE80 = $(addprefix $(AMDDALPATH)/dc/irq/dce80/,$(IRQ_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE80)
+
+###############################################################################
+# DCE 11x
+###############################################################################
+IRQ_DCE11 = irq_service_dce110.o
+
+AMD_DAL_IRQ_DCE11 = $(addprefix $(AMDDALPATH)/dc/irq/dce110/,$(IRQ_DCE11))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE11)
+
+###############################################################################
+# DCE 12x
+###############################################################################
+IRQ_DCE12 = irq_service_dce120.o
+
+AMD_DAL_IRQ_DCE12 = $(addprefix $(AMDDALPATH)/dc/irq/dce120/,$(IRQ_DCE12))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
+
+###############################################################################
+# DCN 1x
+###############################################################################
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
+IRQ_DCN1 = irq_service_dcn10.o
+
+AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN1)
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
new file mode 100644
index 000000000000..f7e40b292dfb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -0,0 +1,430 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce110.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include "dc.h"
+#include "core_types.h"
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\
+ ~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\
+ },\
+ .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
+ .ack_value = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\
+ .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ .enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\
+ ~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\
+ .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
+ .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
+ .ack_value = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\
+ .status_reg = mmHPD ## reg_num ## _DC_HPD_INT_STATUS,\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ .enable_value = {\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
+ .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ .enable_value = {\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
+ .ack_mask =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .enable_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ .enable_value = {\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .ack_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+bool dal_irq_service_dummy_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: called for non-implemented irq source\n",
+ __func__);
+ return false;
+}
+
+bool dal_irq_service_dummy_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: called for non-implemented irq source\n",
+ __func__);
+ return false;
+}
+
+
+bool dce110_vblank_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ struct dc_context *dc_ctx = irq_service->ctx;
+ struct dc *core_dc = irq_service->ctx->dc;
+ enum dc_irq_source dal_irq_src = dc_interrupt_to_irq_source(
+ irq_service->ctx->dc,
+ info->src_id,
+ info->ext_id);
+ uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
+
+ struct timing_generator *tg =
+ core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
+
+ if (enable) {
+ if (!tg->funcs->arm_vert_intr(tg, 2)) {
+ DC_ERROR("Failed to get VBLANK!\n");
+ return false;
+ }
+ }
+
+ dal_irq_service_set_generic(irq_service, info, enable);
+ return true;
+
+}
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce110[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+
+};
+
+enum dc_irq_source to_dal_irq_source_dce110(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case VISLANDS30_IV_SRCID_D2_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case VISLANDS30_IV_SRCID_D3_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case VISLANDS30_IV_SRCID_D4_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case VISLANDS30_IV_SRCID_D5_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case VISLANDS30_IV_SRCID_D1_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case VISLANDS30_IV_SRCID_D2_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case VISLANDS30_IV_SRCID_D3_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case VISLANDS30_IV_SRCID_D4_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case VISLANDS30_IV_SRCID_D5_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case VISLANDS30_IV_SRCID_D6_V_UPDATE_INT:
+ return DC_IRQ_SOURCE_VUPDATE6;
+ case VISLANDS30_IV_SRCID_D1_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case VISLANDS30_IV_SRCID_D2_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case VISLANDS30_IV_SRCID_D3_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case VISLANDS30_IV_SRCID_D4_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case VISLANDS30_IV_SRCID_D5_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case VISLANDS30_IV_SRCID_D6_GRPH_PFLIP:
+ return DC_IRQ_SOURCE_PFLIP6;
+
+ case VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_A:
+ return DC_IRQ_SOURCE_HPD1;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_B:
+ return DC_IRQ_SOURCE_HPD2;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_C:
+ return DC_IRQ_SOURCE_HPD3;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_D:
+ return DC_IRQ_SOURCE_HPD4;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_E:
+ return DC_IRQ_SOURCE_HPD5;
+ case VISLANDS30_IV_EXTID_HOTPLUG_DETECT_F:
+ return DC_IRQ_SOURCE_HPD6;
+ case VISLANDS30_IV_EXTID_HPD_RX_A:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_B:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_C:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_D:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_E:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case VISLANDS30_IV_EXTID_HPD_RX_F:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static const struct irq_service_funcs irq_service_funcs_dce110 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce110;
+ irq_service->funcs = &irq_service_funcs_dce110;
+}
+
+struct irq_service *dal_irq_service_dce110_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
new file mode 100644
index 000000000000..9237646c0959
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE110_H__
+#define __DAL_IRQ_SERVICE_DCE110_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce110_create(
+ struct irq_service_init_data *init_data);
+
+enum dc_irq_source to_dal_irq_source_dce110(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+
+bool dal_irq_service_dummy_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+bool dal_irq_service_dummy_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+bool dce110_vblank_set(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
new file mode 100644
index 000000000000..2ad56b1a4099
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce120.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "vega10/DC/dce_12_0_offset.h"
+#include "vega10/DC/dce_12_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(DCP, reg_num, \
+ GRPH_INTERRUPT_CONTROL, GRPH_PFLIP_INT_MASK, \
+ GRPH_INTERRUPT_STATUS, GRPH_PFLIP_INT_CLEAR),\
+ .status_reg = SRI(GRPH_INTERRUPT_STATUS, DCP, reg_num),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(CRTC, reg_num,\
+ CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
+ CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(CRTC, reg_num,\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL, CRTC_VERTICAL_INTERRUPT0_CLEAR),\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce120[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dce120 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce120;
+ irq_service->funcs = &irq_service_funcs_dce120;
+}
+
+struct irq_service *dal_irq_service_dce120_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h
new file mode 100644
index 000000000000..420c96e8fefc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE120_H__
+#define __DAL_IRQ_SERVICE_DCE120_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce120_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
new file mode 100644
index 000000000000..8a2066c313fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "irq_service_dce80.h"
+#include "../dce110/irq_service_dce110.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include "dc_types.h"
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ DC_HPD1_INT_STATUS,
+ DC_HPD1_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ DC_HPD1_INT_CONTROL,
+ DC_HPD1_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = dce110_vblank_set,
+ .ack = NULL
+};
+
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_INVALID + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\
+ },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD6 + reg_num] = {\
+ .enable_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ .enable_value = {\
+ DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\
+ ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\
+ .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\
+ .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\
+ .status_reg = mmDC_HPD ## reg_num ## _INT_STATUS,\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ .enable_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ .enable_value = {\
+ GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\
+ ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\
+ .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\
+ .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\
+ .status_reg = mmDCP ## reg_num ##_GRPH_INTERRUPT_STATUS,\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_INTERRUPT_CONTROL,\
+ .enable_mask =\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ .enable_value = {\
+ CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\
+ ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\
+ .ack_mask =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ .enable_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .enable_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ .enable_value = {\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\
+ ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\
+ .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\
+ .ack_mask =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .ack_value =\
+ CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+ .funcs = &vblank_irq_info_funcs,\
+ .src_id = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0 + reg_num\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dce80[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_int_entry(6),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ hpd_rx_int_entry(6),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ pflip_int_entry(4),
+ pflip_int_entry(5),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dce80 = {
+ .to_dal_irq_source = to_dal_irq_source_dce110
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dce80;
+ irq_service->funcs = &irq_service_funcs_dce80;
+}
+
+struct irq_service *dal_irq_service_dce80_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
new file mode 100644
index 000000000000..3dd1013576ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCE80_H__
+#define __DAL_IRQ_SERVICE_DCE80_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dce80_create(
+ struct irq_service_init_data *init_data);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
new file mode 100644
index 000000000000..74ad24714f6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "../dce110/irq_service_dce110.h"
+
+#include "raven1/DCN/dcn_1_0_offset.h"
+#include "raven1/DCN/dcn_1_0_sh_mask.h"
+#include "vega10/soc15ip.h"
+
+#include "irq_service_dcn10.h"
+
+#include "ivsrcid/irqsrcs_dcn_1_0.h"
+
+enum dc_irq_source to_dal_irq_source_dcn10(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__DC_D3_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK3;
+ case DCN_1_0__SRCID__DC_D4_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK4;
+ case DCN_1_0__SRCID__DC_D5_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK5;
+ case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__HUBP2_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP3;
+ case DCN_1_0__SRCID__HUBP3_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP4;
+ case DCN_1_0__SRCID__HUBP4_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP5;
+ case DCN_1_0__SRCID__HUBP5_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP6;
+
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD3_INT:
+ return DC_IRQ_SOURCE_HPD3;
+ case DCN_1_0__CTXID__DC_HPD4_INT:
+ return DC_IRQ_SOURCE_HPD4;
+ case DCN_1_0__CTXID__DC_HPD5_INT:
+ return DC_IRQ_SOURCE_HPD5;
+ case DCN_1_0__CTXID__DC_HPD6_INT:
+ return DC_IRQ_SOURCE_HPD6;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ case DCN_1_0__CTXID__DC_HPD3_RX_INT:
+ return DC_IRQ_SOURCE_HPD3RX;
+ case DCN_1_0__CTXID__DC_HPD4_RX_INT:
+ return DC_IRQ_SOURCE_HPD4RX;
+ case DCN_1_0__CTXID__DC_HPD5_RX_INT:
+ return DC_IRQ_SOURCE_HPD5RX;
+ case DCN_1_0__CTXID__DC_HPD6_RX_INT:
+ return DC_IRQ_SOURCE_HPD6RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#define BASE_INNER(seg) \
+ DCE_BASE__INST0_SEG ## seg
+
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ hpd_int_entry(2),
+ hpd_int_entry(3),
+ hpd_int_entry(4),
+ hpd_int_entry(5),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ hpd_rx_int_entry(2),
+ hpd_rx_int_entry(3),
+ hpd_rx_int_entry(4),
+ hpd_rx_int_entry(5),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ i2c_int_entry(3),
+ i2c_int_entry(4),
+ i2c_int_entry(5),
+ i2c_int_entry(6),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dp_sink_int_entry(3),
+ dp_sink_int_entry(4),
+ dp_sink_int_entry(5),
+ dp_sink_int_entry(6),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dc_underflow_int_entry(3),
+ dc_underflow_int_entry(4),
+ dc_underflow_int_entry(5),
+ dc_underflow_int_entry(6),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_int_entry(0),
+ vupdate_int_entry(1),
+ vupdate_int_entry(2),
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+ vblank_int_entry(3),
+ vblank_int_entry(4),
+ vblank_int_entry(5),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dcn10 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn10
+};
+
+static void construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn10;
+ irq_service->funcs = &irq_service_funcs_dcn10;
+}
+
+struct irq_service *dal_irq_service_dcn10_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h
new file mode 100644
index 000000000000..fd2ca4d0c316
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCN10_H__
+#define __DAL_IRQ_SERVICE_DCN10_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn10_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
new file mode 100644
index 000000000000..b106513fc2dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/irq_service_interface.h"
+#include "include/logger_interface.h"
+
+#include "dce110/irq_service_dce110.h"
+
+
+#include "dce80/irq_service_dce80.h"
+
+#include "dce120/irq_service_dce120.h"
+
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/irq_service_dcn10.h"
+#endif
+
+#include "reg_helper.h"
+#include "irq_service.h"
+
+
+
+#define CTX \
+ irq_service->ctx
+
+void dal_irq_service_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ if (!init_data || !init_data->ctx) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ irq_service->ctx = init_data->ctx;
+}
+
+void dal_irq_service_destroy(struct irq_service **irq_service)
+{
+ if (!irq_service || !*irq_service) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ kfree(*irq_service);
+
+ *irq_service = NULL;
+}
+
+const struct irq_source_info *find_irq_source_info(
+ struct irq_service *irq_service,
+ enum dc_irq_source source)
+{
+ if (source > DAL_IRQ_SOURCES_NUMBER || source < DC_IRQ_SOURCE_INVALID)
+ return NULL;
+
+ return &irq_service->info[source];
+}
+
+void dal_irq_service_set_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable)
+{
+ uint32_t addr = info->enable_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+
+ value = (value & ~info->enable_mask) |
+ (info->enable_value[enable ? 0 : 1] & info->enable_mask);
+ dm_write_reg(irq_service->ctx, addr, value);
+}
+
+bool dal_irq_service_set(
+ struct irq_service *irq_service,
+ enum dc_irq_source source,
+ bool enable)
+{
+ const struct irq_source_info *info =
+ find_irq_source_info(irq_service, source);
+
+ if (!info) {
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: cannot find irq info table entry for %d\n",
+ __func__,
+ source);
+ return false;
+ }
+
+ dal_irq_service_ack(irq_service, source);
+
+ if (info->funcs->set)
+ return info->funcs->set(irq_service, info, enable);
+
+ dal_irq_service_set_generic(irq_service, info, enable);
+
+ return true;
+}
+
+void dal_irq_service_ack_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->ack_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+
+ value = (value & ~info->ack_mask) |
+ (info->ack_value & info->ack_mask);
+ dm_write_reg(irq_service->ctx, addr, value);
+}
+
+bool dal_irq_service_ack(
+ struct irq_service *irq_service,
+ enum dc_irq_source source)
+{
+ const struct irq_source_info *info =
+ find_irq_source_info(irq_service, source);
+
+ if (!info) {
+ dm_logger_write(
+ irq_service->ctx->logger, LOG_ERROR,
+ "%s: cannot find irq info table entry for %d\n",
+ __func__,
+ source);
+ return false;
+ }
+
+ if (info->funcs->ack)
+ return info->funcs->ack(irq_service, info);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ return true;
+}
+
+enum dc_irq_source dal_irq_service_to_irq_source(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return irq_service->funcs->to_dal_irq_source(
+ irq_service,
+ src_id,
+ ext_id);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
new file mode 100644
index 000000000000..dbfcb096eedd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_H__
+#define __DAL_IRQ_SERVICE_H__
+
+#include "include/irq_service_interface.h"
+
+#include "irq_types.h"
+
+struct irq_service;
+struct irq_source_info;
+
+struct irq_source_info_funcs {
+ bool (*set)(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+ bool (*ack)(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+};
+
+struct irq_source_info {
+ uint32_t src_id;
+ uint32_t ext_id;
+ uint32_t enable_reg;
+ uint32_t enable_mask;
+ uint32_t enable_value[2];
+ uint32_t ack_reg;
+ uint32_t ack_mask;
+ uint32_t ack_value;
+ uint32_t status_reg;
+ const struct irq_source_info_funcs *funcs;
+};
+
+struct irq_service_funcs {
+ enum dc_irq_source (*to_dal_irq_source)(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+};
+
+struct irq_service {
+ struct dc_context *ctx;
+ const struct irq_source_info *info;
+ const struct irq_service_funcs *funcs;
+};
+
+void dal_irq_service_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data);
+
+void dal_irq_service_ack_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info);
+
+void dal_irq_service_set_generic(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info,
+ bool enable);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h
new file mode 100644
index 000000000000..a506c2e939f5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq_types.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_TYPES_H__
+#define __DAL_IRQ_TYPES_H__
+
+struct dc_context;
+
+typedef void (*interrupt_handler)(void *);
+
+typedef void *irq_handler_idx;
+#define DAL_INVALID_IRQ_HANDLER_IDX NULL
+
+/* The order of the IRQ sources is important and MUST match the one's
+of base driver */
+enum dc_irq_source {
+ /* Use as mask to specify invalid irq source */
+ DC_IRQ_SOURCE_INVALID = 0,
+
+ DC_IRQ_SOURCE_HPD1,
+ DC_IRQ_SOURCE_HPD2,
+ DC_IRQ_SOURCE_HPD3,
+ DC_IRQ_SOURCE_HPD4,
+ DC_IRQ_SOURCE_HPD5,
+ DC_IRQ_SOURCE_HPD6,
+
+ DC_IRQ_SOURCE_HPD1RX,
+ DC_IRQ_SOURCE_HPD2RX,
+ DC_IRQ_SOURCE_HPD3RX,
+ DC_IRQ_SOURCE_HPD4RX,
+ DC_IRQ_SOURCE_HPD5RX,
+ DC_IRQ_SOURCE_HPD6RX,
+
+ DC_IRQ_SOURCE_I2C_DDC1,
+ DC_IRQ_SOURCE_I2C_DDC2,
+ DC_IRQ_SOURCE_I2C_DDC3,
+ DC_IRQ_SOURCE_I2C_DDC4,
+ DC_IRQ_SOURCE_I2C_DDC5,
+ DC_IRQ_SOURCE_I2C_DDC6,
+
+ DC_IRQ_SOURCE_DPSINK1,
+ DC_IRQ_SOURCE_DPSINK2,
+ DC_IRQ_SOURCE_DPSINK3,
+ DC_IRQ_SOURCE_DPSINK4,
+ DC_IRQ_SOURCE_DPSINK5,
+ DC_IRQ_SOURCE_DPSINK6,
+
+ DC_IRQ_SOURCE_TIMER,
+
+ DC_IRQ_SOURCE_PFLIP_FIRST,
+ DC_IRQ_SOURCE_PFLIP1 = DC_IRQ_SOURCE_PFLIP_FIRST,
+ DC_IRQ_SOURCE_PFLIP2,
+ DC_IRQ_SOURCE_PFLIP3,
+ DC_IRQ_SOURCE_PFLIP4,
+ DC_IRQ_SOURCE_PFLIP5,
+ DC_IRQ_SOURCE_PFLIP6,
+ DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
+ DC_IRQ_SOURCE_PFLIP_LAST = DC_IRQ_SOURCE_PFLIP_UNDERLAY0,
+
+ DC_IRQ_SOURCE_GPIOPAD0,
+ DC_IRQ_SOURCE_GPIOPAD1,
+ DC_IRQ_SOURCE_GPIOPAD2,
+ DC_IRQ_SOURCE_GPIOPAD3,
+ DC_IRQ_SOURCE_GPIOPAD4,
+ DC_IRQ_SOURCE_GPIOPAD5,
+ DC_IRQ_SOURCE_GPIOPAD6,
+ DC_IRQ_SOURCE_GPIOPAD7,
+ DC_IRQ_SOURCE_GPIOPAD8,
+ DC_IRQ_SOURCE_GPIOPAD9,
+ DC_IRQ_SOURCE_GPIOPAD10,
+ DC_IRQ_SOURCE_GPIOPAD11,
+ DC_IRQ_SOURCE_GPIOPAD12,
+ DC_IRQ_SOURCE_GPIOPAD13,
+ DC_IRQ_SOURCE_GPIOPAD14,
+ DC_IRQ_SOURCE_GPIOPAD15,
+ DC_IRQ_SOURCE_GPIOPAD16,
+ DC_IRQ_SOURCE_GPIOPAD17,
+ DC_IRQ_SOURCE_GPIOPAD18,
+ DC_IRQ_SOURCE_GPIOPAD19,
+ DC_IRQ_SOURCE_GPIOPAD20,
+ DC_IRQ_SOURCE_GPIOPAD21,
+ DC_IRQ_SOURCE_GPIOPAD22,
+ DC_IRQ_SOURCE_GPIOPAD23,
+ DC_IRQ_SOURCE_GPIOPAD24,
+ DC_IRQ_SOURCE_GPIOPAD25,
+ DC_IRQ_SOURCE_GPIOPAD26,
+ DC_IRQ_SOURCE_GPIOPAD27,
+ DC_IRQ_SOURCE_GPIOPAD28,
+ DC_IRQ_SOURCE_GPIOPAD29,
+ DC_IRQ_SOURCE_GPIOPAD30,
+
+ DC_IRQ_SOURCE_DC1UNDERFLOW,
+ DC_IRQ_SOURCE_DC2UNDERFLOW,
+ DC_IRQ_SOURCE_DC3UNDERFLOW,
+ DC_IRQ_SOURCE_DC4UNDERFLOW,
+ DC_IRQ_SOURCE_DC5UNDERFLOW,
+ DC_IRQ_SOURCE_DC6UNDERFLOW,
+
+ DC_IRQ_SOURCE_DMCU_SCP,
+ DC_IRQ_SOURCE_VBIOS_SW,
+
+ DC_IRQ_SOURCE_VUPDATE1,
+ DC_IRQ_SOURCE_VUPDATE2,
+ DC_IRQ_SOURCE_VUPDATE3,
+ DC_IRQ_SOURCE_VUPDATE4,
+ DC_IRQ_SOURCE_VUPDATE5,
+ DC_IRQ_SOURCE_VUPDATE6,
+
+ DC_IRQ_SOURCE_VBLANK1,
+ DC_IRQ_SOURCE_VBLANK2,
+ DC_IRQ_SOURCE_VBLANK3,
+ DC_IRQ_SOURCE_VBLANK4,
+ DC_IRQ_SOURCE_VBLANK5,
+ DC_IRQ_SOURCE_VBLANK6,
+
+ DAL_IRQ_SOURCES_NUMBER
+};
+
+enum irq_type
+{
+ IRQ_TYPE_PFLIP = DC_IRQ_SOURCE_PFLIP1,
+ IRQ_TYPE_VUPDATE = DC_IRQ_SOURCE_VUPDATE1,
+ IRQ_TYPE_VBLANK = DC_IRQ_SOURCE_VBLANK1,
+};
+
+#define DAL_VALID_IRQ_SRC_NUM(src) \
+ ((src) <= DAL_IRQ_SOURCES_NUMBER && (src) > DC_IRQ_SOURCE_INVALID)
+
+/* Number of Page Flip IRQ Sources. */
+#define DAL_PFLIP_IRQ_SRC_NUM \
+ (DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1)
+
+/* the number of contexts may be expanded in the future based on needs */
+enum dc_interrupt_context {
+ INTERRUPT_LOW_IRQ_CONTEXT = 0,
+ INTERRUPT_HIGH_IRQ_CONTEXT,
+ INTERRUPT_CONTEXT_NUMBER
+};
+
+enum dc_interrupt_porlarity {
+ INTERRUPT_POLARITY_DEFAULT = 0,
+ INTERRUPT_POLARITY_LOW = INTERRUPT_POLARITY_DEFAULT,
+ INTERRUPT_POLARITY_HIGH,
+ INTERRUPT_POLARITY_BOTH
+};
+
+#define DC_DECODE_INTERRUPT_POLARITY(int_polarity) \
+ (int_polarity == INTERRUPT_POLARITY_LOW) ? "Low" : \
+ (int_polarity == INTERRUPT_POLARITY_HIGH) ? "High" : \
+ (int_polarity == INTERRUPT_POLARITY_BOTH) ? "Both" : "Invalid"
+
+struct dc_timer_interrupt_params {
+ uint32_t micro_sec_interval;
+ enum dc_interrupt_context int_context;
+};
+
+struct dc_interrupt_params {
+ /* The polarity *change* which will trigger an interrupt.
+ * If 'requested_polarity == INTERRUPT_POLARITY_BOTH', then
+ * 'current_polarity' must be initialised. */
+ enum dc_interrupt_porlarity requested_polarity;
+ /* If 'requested_polarity == INTERRUPT_POLARITY_BOTH',
+ * 'current_polarity' should contain the current state, which means
+ * the interrupt will be triggered when state changes from what is,
+ * in 'current_polarity'. */
+ enum dc_interrupt_porlarity current_polarity;
+ enum dc_irq_source irq_source;
+ enum dc_interrupt_context int_context;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
new file mode 100644
index 000000000000..a87c0329541f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _OS_TYPES_H_
+#define _OS_TYPES_H_
+
+#if defined __KERNEL__
+
+#include <asm/byteorder.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+
+#include <linux/kref.h>
+
+#include "cgs_linux.h"
+
+#if defined(__BIG_ENDIAN) && !defined(BIGENDIAN_CPU)
+#define BIGENDIAN_CPU
+#elif defined(__LITTLE_ENDIAN) && !defined(LITTLEENDIAN_CPU)
+#define LITTLEENDIAN_CPU
+#endif
+
+#undef READ
+#undef WRITE
+#undef FRAME_SIZE
+
+#define dm_output_to_console(fmt, ...) DRM_INFO(fmt, ##__VA_ARGS__)
+
+#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
+
+#define dm_debug(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
+
+#define dm_vlog(fmt, args) vprintk(fmt, args)
+
+#endif
+
+/*
+ *
+ * general debug capabilities
+ *
+ */
+#if defined(CONFIG_HAVE_KGDB) || defined(CONFIG_KGDB)
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ kgdb_breakpoint(); \
+ } \
+} while (0)
+#else
+#define ASSERT_CRITICAL(expr) do { \
+ if (WARN_ON(!(expr))) { \
+ ; \
+ } \
+} while (0)
+#endif
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+#define ASSERT(expr) ASSERT_CRITICAL(expr)
+
+#else
+#define ASSERT(expr) WARN_ON(!(expr))
+#endif
+
+#define BREAK_TO_DEBUGGER() ASSERT(0)
+
+#define DC_ERR(...) do { \
+ dm_error(__VA_ARGS__); \
+ BREAK_TO_DEBUGGER(); \
+} while (0)
+
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include <asm/fpu/api.h>
+#endif
+
+#endif /* _OS_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/Makefile b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
new file mode 100644
index 000000000000..fc0b7318d9cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the virtual sub-component of DAL.
+# It provides the control and status of HW CRTC block.
+
+VIRTUAL = virtual_link_encoder.o virtual_stream_encoder.o
+
+AMD_DAL_VIRTUAL = $(addprefix $(AMDDALPATH)/dc/virtual/,$(VIRTUAL))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_VIRTUAL)
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
new file mode 100644
index 000000000000..88c2bde3f039
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_services_types.h"
+
+#include "virtual_link_encoder.h"
+
+static bool virtual_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream) { return true; }
+
+static void virtual_link_encoder_hw_init(struct link_encoder *enc) {}
+
+static void virtual_link_encoder_setup(
+ struct link_encoder *enc,
+ enum signal_type signal) {}
+
+static void virtual_link_encoder_enable_tmds_output(
+ struct link_encoder *enc,
+ enum clock_source_id clock_source,
+ enum dc_color_depth color_depth,
+ bool hdmi,
+ bool dual_link,
+ uint32_t pixel_clock) {}
+
+static void virtual_link_encoder_enable_dp_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source) {}
+
+static void virtual_link_encoder_enable_dp_mst_output(
+ struct link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum clock_source_id clock_source) {}
+
+static void virtual_link_encoder_disable_output(
+ struct link_encoder *link_enc,
+ enum signal_type signal,
+ struct dc_link *link) {}
+
+static void virtual_link_encoder_dp_set_lane_settings(
+ struct link_encoder *enc,
+ const struct link_training_settings *link_settings) {}
+
+static void virtual_link_encoder_dp_set_phy_pattern(
+ struct link_encoder *enc,
+ const struct encoder_set_dp_phy_pattern_param *param) {}
+
+static void virtual_link_encoder_update_mst_stream_allocation_table(
+ struct link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table) {}
+
+static void virtual_link_encoder_connect_dig_be_to_fe(
+ struct link_encoder *enc,
+ enum engine_id engine,
+ bool connect) {}
+
+static void virtual_link_encoder_destroy(struct link_encoder **enc)
+{
+ kfree(*enc);
+ *enc = NULL;
+}
+
+
+static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
+ .validate_output_with_stream =
+ virtual_link_encoder_validate_output_with_stream,
+ .hw_init = virtual_link_encoder_hw_init,
+ .setup = virtual_link_encoder_setup,
+ .enable_tmds_output = virtual_link_encoder_enable_tmds_output,
+ .enable_dp_output = virtual_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
+ .disable_output = virtual_link_encoder_disable_output,
+ .dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ virtual_link_encoder_update_mst_stream_allocation_table,
+ .connect_dig_be_to_fe = virtual_link_encoder_connect_dig_be_to_fe,
+ .destroy = virtual_link_encoder_destroy
+};
+
+bool virtual_link_encoder_construct(
+ struct link_encoder *enc, const struct encoder_init_data *init_data)
+{
+ enc->funcs = &virtual_lnk_enc_funcs;
+ enc->ctx = init_data->ctx;
+ enc->id = init_data->encoder;
+
+ enc->hpd_source = init_data->hpd_source;
+ enc->connector = init_data->connector;
+
+ enc->transmitter = init_data->transmitter;
+
+ enc->output_signals = SIGNAL_TYPE_VIRTUAL;
+
+ enc->preferred_engine = ENGINE_ID_VIRTUAL;
+
+ return true;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
new file mode 100644
index 000000000000..eb1a94fb8a9b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_VIRTUAL_LINK_ENCODER_H__
+#define __DC_VIRTUAL_LINK_ENCODER_H__
+
+#include "link_encoder.h"
+
+bool virtual_link_encoder_construct(
+ struct link_encoder *enc, const struct encoder_init_data *init_data);
+
+#endif /* __DC_VIRTUAL_LINK_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
new file mode 100644
index 000000000000..3dc1733eea20
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "virtual_stream_encoder.h"
+
+static void virtual_stream_encoder_dp_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space) {}
+
+static void virtual_stream_encoder_hdmi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ int actual_pix_clk_khz,
+ bool enable_audio) {}
+
+static void virtual_stream_encoder_dvi_set_stream_attribute(
+ struct stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ bool is_dual_link) {}
+
+static void virtual_stream_encoder_set_mst_bandwidth(
+ struct stream_encoder *enc,
+ struct fixed31_32 avg_time_slots_per_mtp) {}
+
+static void virtual_stream_encoder_update_hdmi_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame) {}
+
+static void virtual_stream_encoder_stop_hdmi_info_packets(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_set_avmute(
+ struct stream_encoder *enc,
+ bool enable) {}
+static void virtual_stream_encoder_update_dp_info_packets(
+ struct stream_encoder *enc,
+ const struct encoder_info_frame *info_frame) {}
+
+static void virtual_stream_encoder_stop_dp_info_packets(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_dp_blank(
+ struct stream_encoder *enc) {}
+
+static void virtual_stream_encoder_dp_unblank(
+ struct stream_encoder *enc,
+ const struct encoder_unblank_param *param) {}
+
+static void virtual_audio_mute_control(
+ struct stream_encoder *enc,
+ bool mute) {}
+
+static const struct stream_encoder_funcs virtual_str_enc_funcs = {
+ .dp_set_stream_attribute =
+ virtual_stream_encoder_dp_set_stream_attribute,
+ .hdmi_set_stream_attribute =
+ virtual_stream_encoder_hdmi_set_stream_attribute,
+ .dvi_set_stream_attribute =
+ virtual_stream_encoder_dvi_set_stream_attribute,
+ .set_mst_bandwidth =
+ virtual_stream_encoder_set_mst_bandwidth,
+ .update_hdmi_info_packets =
+ virtual_stream_encoder_update_hdmi_info_packets,
+ .stop_hdmi_info_packets =
+ virtual_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets =
+ virtual_stream_encoder_update_dp_info_packets,
+ .stop_dp_info_packets =
+ virtual_stream_encoder_stop_dp_info_packets,
+ .dp_blank =
+ virtual_stream_encoder_dp_blank,
+ .dp_unblank =
+ virtual_stream_encoder_dp_unblank,
+
+ .audio_mute_control = virtual_audio_mute_control,
+ .set_avmute = virtual_stream_encoder_set_avmute,
+};
+
+bool virtual_stream_encoder_construct(
+ struct stream_encoder *enc,
+ struct dc_context *ctx,
+ struct dc_bios *bp)
+{
+ if (!enc)
+ return false;
+ if (!bp)
+ return false;
+
+ enc->funcs = &virtual_str_enc_funcs;
+ enc->ctx = ctx;
+ enc->id = ENGINE_ID_VIRTUAL;
+ enc->bp = bp;
+
+ return true;
+}
+
+struct stream_encoder *virtual_stream_encoder_create(
+ struct dc_context *ctx, struct dc_bios *bp)
+{
+ struct stream_encoder *enc = kzalloc(sizeof(*enc), GFP_KERNEL);
+
+ if (!enc)
+ return NULL;
+
+ if (virtual_stream_encoder_construct(enc, ctx, bp))
+ return enc;
+
+ BREAK_TO_DEBUGGER();
+ kfree(enc);
+ return NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
new file mode 100644
index 000000000000..bf3422c66976
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_VIRTUAL_STREAM_ENCODER_H__
+#define __DC_VIRTUAL_STREAM_ENCODER_H__
+
+#include "stream_encoder.h"
+
+struct stream_encoder *virtual_stream_encoder_create(
+ struct dc_context *ctx, struct dc_bios *bp);
+
+bool virtual_stream_encoder_construct(
+ struct stream_encoder *enc,
+ struct dc_context *ctx,
+ struct dc_bios *bp);
+
+#endif /* __DC_VIRTUAL_STREAM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/audio_types.h b/drivers/gpu/drm/amd/display/include/audio_types.h
new file mode 100644
index 000000000000..6364fbc24cfe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/audio_types.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AUDIO_TYPES_H__
+#define __AUDIO_TYPES_H__
+
+#include "signal_types.h"
+
+#define AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 20
+#define MAX_HW_AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS 18
+#define MULTI_CHANNEL_SPLIT_NO_ASSO_INFO 0xFFFFFFFF
+
+
+struct audio_crtc_info {
+ uint32_t h_total;
+ uint32_t h_active;
+ uint32_t v_active;
+ uint32_t pixel_repetition;
+ uint32_t requested_pixel_clock; /* in KHz */
+ uint32_t calculated_pixel_clock; /* in KHz */
+ uint32_t refresh_rate;
+ enum dc_color_depth color_depth;
+ bool interlaced;
+};
+struct azalia_clock_info {
+ uint32_t pixel_clock_in_10khz;
+ uint32_t audio_dto_phase;
+ uint32_t audio_dto_module;
+ uint32_t audio_dto_wall_clock_ratio;
+};
+
+enum audio_dto_source {
+ DTO_SOURCE_UNKNOWN = 0,
+ DTO_SOURCE_ID0,
+ DTO_SOURCE_ID1,
+ DTO_SOURCE_ID2,
+ DTO_SOURCE_ID3,
+ DTO_SOURCE_ID4,
+ DTO_SOURCE_ID5
+};
+
+/* PLL information required for AZALIA DTO calculation */
+
+struct audio_pll_info {
+ uint32_t dp_dto_source_clock_in_khz;
+ uint32_t feed_back_divider;
+ enum audio_dto_source dto_source;
+ bool ss_enabled;
+ uint32_t ss_percentage;
+ uint32_t ss_percentage_divider;
+};
+
+struct audio_channel_associate_info {
+ union {
+ struct {
+ uint32_t ALL_CHANNEL_FL:4;
+ uint32_t ALL_CHANNEL_FR:4;
+ uint32_t ALL_CHANNEL_FC:4;
+ uint32_t ALL_CHANNEL_Sub:4;
+ uint32_t ALL_CHANNEL_SL:4;
+ uint32_t ALL_CHANNEL_SR:4;
+ uint32_t ALL_CHANNEL_BL:4;
+ uint32_t ALL_CHANNEL_BR:4;
+ } bits;
+ uint32_t u32all;
+ };
+};
+
+struct audio_output {
+ /* Front DIG id. */
+ enum engine_id engine_id;
+ /* encoder output signal */
+ enum signal_type signal;
+ /* video timing */
+ struct audio_crtc_info crtc_info;
+ /* PLL for audio */
+ struct audio_pll_info pll_info;
+};
+
+enum audio_payload {
+ CHANNEL_SPLIT_MAPPINGCHANG = 0x9,
+};
+
+#endif /* __AUDIO_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_interface.h b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
new file mode 100644
index 000000000000..d51101c5c6b0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_interface.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_INTERFACE_H__
+#define __DAL_BIOS_PARSER_INTERFACE_H__
+
+#include "dc_bios_types.h"
+
+struct bios_parser;
+
+struct bp_init_data {
+ struct dc_context *ctx;
+ uint8_t *bios;
+};
+
+struct dc_bios *dal_bios_parser_create(
+ struct bp_init_data *init,
+ enum dce_version dce_version);
+
+void dal_bios_parser_destroy(struct dc_bios **dcb);
+
+#endif /* __DAL_BIOS_PARSER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
new file mode 100644
index 000000000000..0840f69cde99
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_BIOS_PARSER_TYPES_H__
+
+#define __DAL_BIOS_PARSER_TYPES_H__
+
+#include "dm_services.h"
+#include "include/signal_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/gpio_types.h"
+#include "include/link_service_types.h"
+
+/* TODO: include signal_types.h and remove this enum */
+enum as_signal_type {
+ AS_SIGNAL_TYPE_NONE = 0L, /* no signal */
+ AS_SIGNAL_TYPE_DVI,
+ AS_SIGNAL_TYPE_HDMI,
+ AS_SIGNAL_TYPE_LVDS,
+ AS_SIGNAL_TYPE_DISPLAY_PORT,
+ AS_SIGNAL_TYPE_GPU_PLL,
+ AS_SIGNAL_TYPE_UNKNOWN
+};
+
+enum bp_result {
+ BP_RESULT_OK = 0, /* There was no error */
+ BP_RESULT_BADINPUT, /*Bad input parameter */
+ BP_RESULT_BADBIOSTABLE, /* Bad BIOS table */
+ BP_RESULT_UNSUPPORTED, /* BIOS Table is not supported */
+ BP_RESULT_NORECORD, /* Record can't be found */
+ BP_RESULT_FAILURE
+};
+
+enum bp_encoder_control_action {
+ /* direct VBIOS translation! Just to simplify the translation */
+ ENCODER_CONTROL_DISABLE = 0,
+ ENCODER_CONTROL_ENABLE,
+ ENCODER_CONTROL_SETUP,
+ ENCODER_CONTROL_INIT
+};
+
+enum bp_transmitter_control_action {
+ /* direct VBIOS translation! Just to simplify the translation */
+ TRANSMITTER_CONTROL_DISABLE = 0,
+ TRANSMITTER_CONTROL_ENABLE,
+ TRANSMITTER_CONTROL_BACKLIGHT_OFF,
+ TRANSMITTER_CONTROL_BACKLIGHT_ON,
+ TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS,
+ TRANSMITTER_CONTROL_LCD_SETF_TEST_START,
+ TRANSMITTER_CONTROL_LCD_SELF_TEST_STOP,
+ TRANSMITTER_CONTROL_INIT,
+ TRANSMITTER_CONTROL_DEACTIVATE,
+ TRANSMITTER_CONTROL_ACTIAVATE,
+ TRANSMITTER_CONTROL_SETUP,
+ TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS,
+ /* ATOM_TRANSMITTER_ACTION_POWER_ON. This action is for eDP only
+ * (power up the panel)
+ */
+ TRANSMITTER_CONTROL_POWER_ON,
+ /* ATOM_TRANSMITTER_ACTION_POWER_OFF. This action is for eDP only
+ * (power down the panel)
+ */
+ TRANSMITTER_CONTROL_POWER_OFF
+};
+
+enum bp_external_encoder_control_action {
+ EXTERNAL_ENCODER_CONTROL_DISABLE = 0,
+ EXTERNAL_ENCODER_CONTROL_ENABLE = 1,
+ EXTERNAL_ENCODER_CONTROL_INIT = 0x7,
+ EXTERNAL_ENCODER_CONTROL_SETUP = 0xf,
+ EXTERNAL_ENCODER_CONTROL_UNBLANK = 0x10,
+ EXTERNAL_ENCODER_CONTROL_BLANK = 0x11,
+};
+
+enum bp_pipe_control_action {
+ ASIC_PIPE_DISABLE = 0,
+ ASIC_PIPE_ENABLE,
+ ASIC_PIPE_INIT
+};
+
+struct bp_encoder_control {
+ enum bp_encoder_control_action action;
+ enum engine_id engine_id;
+ enum transmitter transmitter;
+ enum signal_type signal;
+ enum dc_lane_count lanes_number;
+ enum dc_color_depth color_depth;
+ bool enable_dp_audio;
+ uint32_t pixel_clock; /* khz */
+};
+
+struct bp_external_encoder_control {
+ enum bp_external_encoder_control_action action;
+ enum engine_id engine_id;
+ enum dc_link_rate link_rate;
+ enum dc_lane_count lanes_number;
+ enum signal_type signal;
+ enum dc_color_depth color_depth;
+ bool coherent;
+ struct graphics_object_id encoder_id;
+ struct graphics_object_id connector_obj_id;
+ uint32_t pixel_clock; /* in KHz */
+};
+
+struct bp_crtc_source_select {
+ enum engine_id engine_id;
+ enum controller_id controller_id;
+ /* from GPU Tx aka asic_signal */
+ enum signal_type signal;
+ /* sink_signal may differ from asicSignal if Translator encoder */
+ enum signal_type sink_signal;
+ enum display_output_bit_depth display_output_bit_depth;
+ bool enable_dp_audio;
+};
+
+struct bp_transmitter_control {
+ enum bp_transmitter_control_action action;
+ enum engine_id engine_id;
+ enum transmitter transmitter; /* PhyId */
+ enum dc_lane_count lanes_number;
+ enum clock_source_id pll_id; /* needed for DCE 4.0 */
+ enum signal_type signal;
+ enum dc_color_depth color_depth; /* not used for DCE6.0 */
+ enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */
+ struct graphics_object_id connector_obj_id;
+ /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should
+ * be pixel clock * deep_color_ratio (in KHz)
+ */
+ uint32_t pixel_clock;
+ uint32_t lane_select;
+ uint32_t lane_settings;
+ bool coherent;
+ bool multi_path;
+ bool single_pll_mode;
+};
+
+struct bp_hw_crtc_timing_parameters {
+ enum controller_id controller_id;
+ /* horizontal part */
+ uint32_t h_total;
+ uint32_t h_addressable;
+ uint32_t h_overscan_left;
+ uint32_t h_overscan_right;
+ uint32_t h_sync_start;
+ uint32_t h_sync_width;
+
+ /* vertical part */
+ uint32_t v_total;
+ uint32_t v_addressable;
+ uint32_t v_overscan_top;
+ uint32_t v_overscan_bottom;
+ uint32_t v_sync_start;
+ uint32_t v_sync_width;
+
+ struct timing_flags {
+ uint32_t INTERLACE:1;
+ uint32_t PIXEL_REPETITION:4;
+ uint32_t HSYNC_POSITIVE_POLARITY:1;
+ uint32_t VSYNC_POSITIVE_POLARITY:1;
+ uint32_t HORZ_COUNT_BY_TWO:1;
+ } flags;
+};
+
+struct bp_adjust_pixel_clock_parameters {
+ /* Input: Signal Type - to be converted to Encoder mode */
+ enum signal_type signal_type;
+ /* Input: Encoder object id */
+ struct graphics_object_id encoder_object_id;
+ /* Input: Pixel Clock (requested Pixel clock based on Video timing
+ * standard used) in KHz
+ */
+ uint32_t pixel_clock;
+ /* Output: Adjusted Pixel Clock (after VBIOS exec table) in KHz */
+ uint32_t adjusted_pixel_clock;
+ /* Output: If non-zero, this refDiv value should be used to calculate
+ * other ppll params */
+ uint32_t reference_divider;
+ /* Output: If non-zero, this postDiv value should be used to calculate
+ * other ppll params */
+ uint32_t pixel_clock_post_divider;
+ /* Input: Enable spread spectrum */
+ bool ss_enable;
+};
+
+struct bp_pixel_clock_parameters {
+ enum controller_id controller_id; /* (Which CRTC uses this PLL) */
+ enum clock_source_id pll_id; /* Clock Source Id */
+ /* signal_type -> Encoder Mode - needed by VBIOS Exec table */
+ enum signal_type signal_type;
+ /* Adjusted Pixel Clock (after VBIOS exec table)
+ * that becomes Target Pixel Clock (KHz) */
+ uint32_t target_pixel_clock;
+ /* Calculated Reference divider of Display PLL */
+ uint32_t reference_divider;
+ /* Calculated Feedback divider of Display PLL */
+ uint32_t feedback_divider;
+ /* Calculated Fractional Feedback divider of Display PLL */
+ uint32_t fractional_feedback_divider;
+ /* Calculated Pixel Clock Post divider of Display PLL */
+ uint32_t pixel_clock_post_divider;
+ struct graphics_object_id encoder_object_id; /* Encoder object id */
+ /* VBIOS returns a fixed display clock when DFS-bypass feature
+ * is enabled (KHz) */
+ uint32_t dfs_bypass_display_clock;
+ /* color depth to support HDMI deep color */
+ enum transmitter_color_depth color_depth;
+
+ struct program_pixel_clock_flags {
+ uint32_t FORCE_PROGRAMMING_OF_PLL:1;
+ /* Use Engine Clock as source for Display Clock when
+ * programming PLL */
+ uint32_t USE_E_CLOCK_AS_SOURCE_FOR_D_CLOCK:1;
+ /* Use external reference clock (refDivSrc for PLL) */
+ uint32_t SET_EXTERNAL_REF_DIV_SRC:1;
+ /* Force program PHY PLL only */
+ uint32_t PROGRAM_PHY_PLL_ONLY:1;
+ /* Support for YUV420 */
+ uint32_t SUPPORT_YUV_420:1;
+ /* Use XTALIN reference clock source */
+ uint32_t SET_XTALIN_REF_SRC:1;
+ /* Use GENLK reference clock source */
+ uint32_t SET_GENLOCK_REF_DIV_SRC:1;
+ } flags;
+};
+
+enum bp_dce_clock_type {
+ DCECLOCK_TYPE_DISPLAY_CLOCK = 0,
+ DCECLOCK_TYPE_DPREFCLK = 1
+};
+
+/* DCE Clock Parameters structure for SetDceClock Exec command table */
+struct bp_set_dce_clock_parameters {
+ enum clock_source_id pll_id; /* Clock Source Id */
+ /* Display clock or DPREFCLK value */
+ uint32_t target_clock_frequency;
+ /* Clock to set: =0: DISPCLK =1: DPREFCLK =2: PIXCLK */
+ enum bp_dce_clock_type clock_type;
+
+ struct set_dce_clock_flags {
+ uint32_t USE_GENERICA_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use XTALIN reference clock source */
+ uint32_t USE_XTALIN_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use PCIE reference clock source */
+ uint32_t USE_PCIE_AS_SOURCE_FOR_DPREFCLK:1;
+ /* Use GENLK reference clock source */
+ uint32_t USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK:1;
+ } flags;
+};
+
+struct spread_spectrum_flags {
+ /* 1 = Center Spread; 0 = down spread */
+ uint32_t CENTER_SPREAD:1;
+ /* 1 = external; 0 = internal */
+ uint32_t EXTERNAL_SS:1;
+ /* 1 = delta-sigma type parameter; 0 = ver1 */
+ uint32_t DS_TYPE:1;
+};
+
+struct bp_spread_spectrum_parameters {
+ enum clock_source_id pll_id;
+ uint32_t percentage;
+ uint32_t ds_frac_amount;
+
+ union {
+ struct {
+ uint32_t step;
+ uint32_t delay;
+ uint32_t range; /* In Hz unit */
+ } ver1;
+ struct {
+ uint32_t feedback_amount;
+ uint32_t nfrac_amount;
+ uint32_t ds_frac_size;
+ } ds;
+ };
+
+ struct spread_spectrum_flags flags;
+};
+
+struct bp_encoder_cap_info {
+ uint32_t DP_HBR2_CAP:1;
+ uint32_t DP_HBR2_EN:1;
+ uint32_t DP_HBR3_EN:1;
+ uint32_t HDMI_6GB_EN:1;
+ uint32_t RESERVED:30;
+};
+
+#endif /*__DAL_BIOS_PARSER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
new file mode 100644
index 000000000000..7abe663ecc6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_ASIC_ID_H__
+#define __DAL_ASIC_ID_H__
+
+/*
+ * ASIC internal revision ID
+ */
+
+/* DCE80 (based on ci_id.h in Perforce) */
+#define CI_BONAIRE_M_A0 0x14
+#define CI_BONAIRE_M_A1 0x15
+#define CI_HAWAII_P_A0 0x28
+
+#define CI_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_BONAIRE_M(rev) \
+ ((rev >= CI_BONAIRE_M_A0) && (rev < CI_HAWAII_P_A0))
+
+#define ASIC_REV_IS_HAWAII_P(rev) \
+ (rev >= CI_HAWAII_P_A0)
+
+/* KV1 with Spectre GFX core, 8-8-1-2 (CU-Pix-Primitive-RB) */
+#define KV_SPECTRE_A0 0x01
+
+/* KV2 with Spooky GFX core, including downgraded from Spectre core,
+ * 3-4-1-1 (CU-Pix-Primitive-RB) */
+#define KV_SPOOKY_A0 0x41
+
+/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define KB_KALINDI_A0 0x81
+
+/* KB with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define KB_KALINDI_A1 0x82
+
+/* BV with Kalindi GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define BV_KALINDI_A2 0x85
+
+/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define ML_GODAVARI_A0 0xA1
+
+/* ML with Godavari GFX core, 2-4-1-1 (CU-Pix-Primitive-RB) */
+#define ML_GODAVARI_A1 0xA2
+
+#define KV_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_KALINDI(rev) \
+ ((rev >= KB_KALINDI_A0) && (rev < KV_UNKNOWN))
+
+#define ASIC_REV_IS_BHAVANI(rev) \
+ ((rev >= BV_KALINDI_A2) && (rev < ML_GODAVARI_A0))
+
+#define ASIC_REV_IS_GODAVARI(rev) \
+ ((rev >= ML_GODAVARI_A0) && (rev < KV_UNKNOWN))
+
+/* VI Family */
+/* DCE10 */
+#define VI_TONGA_P_A0 20
+#define VI_TONGA_P_A1 21
+#define VI_FIJI_P_A0 60
+
+/* DCE112 */
+#define VI_POLARIS10_P_A0 80
+#define VI_POLARIS11_M_A0 90
+#define VI_POLARIS12_V_A0 100
+
+#define VI_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_TONGA_P(eChipRev) ((eChipRev >= VI_TONGA_P_A0) && \
+ (eChipRev < 40))
+#define ASIC_REV_IS_FIJI_P(eChipRev) ((eChipRev >= VI_FIJI_P_A0) && \
+ (eChipRev < 80))
+
+#define ASIC_REV_IS_POLARIS10_P(eChipRev) ((eChipRev >= VI_POLARIS10_P_A0) && \
+ (eChipRev < VI_POLARIS11_M_A0))
+#define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \
+ (eChipRev < VI_POLARIS12_V_A0))
+#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0)
+
+/* DCE11 */
+#define CZ_CARRIZO_A0 0x01
+
+#define STONEY_A0 0x61
+#define CZ_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_STONEY(rev) \
+ ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN))
+
+/* DCN1_0 */
+#define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */
+#define RAVEN_A0 0x01
+#define RAVEN_B0 0x21
+#define RAVEN_UNKNOWN 0xFF
+
+#define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
+#define RAVEN1_F0 0xF0
+#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
+
+
+#define FAMILY_RV 142 /* DCN 1*/
+
+/*
+ * ASIC chip ID
+ */
+/* DCE80 */
+#define DEVICE_ID_KALINDI_9834 0x9834
+#define DEVICE_ID_TEMASH_9839 0x9839
+#define DEVICE_ID_TEMASH_983D 0x983D
+
+/* Asic Family IDs for different asic family. */
+#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
+#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
+#define FAMILY_VI 130 /* Volcanic Islands: Iceland (V), Tonga (M) */
+#define FAMILY_CZ 135 /* Carrizo */
+
+#define FAMILY_AI 141
+
+#define FAMILY_UNKNOWN 0xFF
+
+#endif /* __DAL_ASIC_ID_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
new file mode 100644
index 000000000000..fa543965feb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TYPES_H__
+#define __DAL_TYPES_H__
+
+#include "signal_types.h"
+#include "dc_types.h"
+
+struct dal_logger;
+struct dc_bios;
+
+enum dce_version {
+ DCE_VERSION_UNKNOWN = (-1),
+ DCE_VERSION_8_0,
+ DCE_VERSION_8_1,
+ DCE_VERSION_8_3,
+ DCE_VERSION_10_0,
+ DCE_VERSION_11_0,
+ DCE_VERSION_11_2,
+ DCE_VERSION_12_0,
+ DCE_VERSION_MAX,
+ DCN_VERSION_1_0,
+ DCN_VERSION_MAX
+};
+
+#endif /* __DAL_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
new file mode 100644
index 000000000000..0ff2a899b8f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __DAL_DDC_SERVICE_TYPES_H__
+#define __DAL_DDC_SERVICE_TYPES_H__
+
+#define DP_BRANCH_DEVICE_ID_1 0x0010FA
+#define DP_BRANCH_DEVICE_ID_2 0x0022B9
+#define DP_SINK_DEVICE_ID_1 0x4CE000
+#define DP_BRANCH_DEVICE_ID_3 0x00001A
+#define DP_BRANCH_DEVICE_ID_4 0x0080e1
+#define DP_BRANCH_DEVICE_ID_5 0x006037
+#define DP_SINK_DEVICE_ID_2 0x001CF8
+
+
+enum ddc_result {
+ DDC_RESULT_UNKNOWN = 0,
+ DDC_RESULT_SUCESSFULL,
+ DDC_RESULT_FAILED_CHANNEL_BUSY,
+ DDC_RESULT_FAILED_TIMEOUT,
+ DDC_RESULT_FAILED_PROTOCOL_ERROR,
+ DDC_RESULT_FAILED_NACK,
+ DDC_RESULT_FAILED_INCOMPLETE,
+ DDC_RESULT_FAILED_OPERATION,
+ DDC_RESULT_FAILED_INVALID_OPERATION,
+ DDC_RESULT_FAILED_BUFFER_OVERFLOW
+};
+
+enum ddc_service_type {
+ DDC_SERVICE_TYPE_CONNECTOR,
+ DDC_SERVICE_TYPE_DISPLAY_PORT_MST,
+};
+
+/**
+ * display sink capability
+ */
+struct display_sink_capability {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+
+ /**********************************************************
+ capabilities going INTO SINK DEVICE (stream capabilities)
+ **********************************************************/
+ /* Dongle's downstream count. */
+ uint32_t downstrm_sink_count;
+ /* Is dongle's downstream count info field (downstrm_sink_count)
+ * valid. */
+ bool downstrm_sink_count_valid;
+
+ /* Maximum additional audio delay in microsecond (us) */
+ uint32_t additional_audio_delay;
+ /* Audio latency value in microsecond (us) */
+ uint32_t audio_latency;
+ /* Interlace video latency value in microsecond (us) */
+ uint32_t video_latency_interlace;
+ /* Progressive video latency value in microsecond (us) */
+ uint32_t video_latency_progressive;
+ /* Dongle caps: Maximum pixel clock supported over dongle for HDMI */
+ uint32_t max_hdmi_pixel_clock;
+ /* Dongle caps: Maximum deep color supported over dongle for HDMI */
+ enum dc_color_depth max_hdmi_deep_color;
+
+ /************************************************************
+ capabilities going OUT OF SOURCE DEVICE (link capabilities)
+ ************************************************************/
+ /* support for Spread Spectrum(SS) */
+ bool ss_supported;
+ /* DP link settings (laneCount, linkRate, Spread) */
+ uint32_t dp_link_lane_count;
+ uint32_t dp_link_rate;
+ uint32_t dp_link_spead;
+
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ /* to check if we have queried the display capability
+ * for eDP panel already. */
+ bool is_edp_sink_cap_valid;
+
+ enum ddc_transaction_type transaction_type;
+ enum signal_type signal;
+};
+
+struct av_sync_data {
+ uint8_t av_granularity;/* DPCD 00023h */
+ uint8_t aud_dec_lat1;/* DPCD 00024h */
+ uint8_t aud_dec_lat2;/* DPCD 00025h */
+ uint8_t aud_pp_lat1;/* DPCD 00026h */
+ uint8_t aud_pp_lat2;/* DPCD 00027h */
+ uint8_t vid_inter_lat;/* DPCD 00028h */
+ uint8_t vid_prog_lat;/* DPCD 00029h */
+ uint8_t aud_del_ins1;/* DPCD 0002Bh */
+ uint8_t aud_del_ins2;/* DPCD 0002Ch */
+ uint8_t aud_del_ins3;/* DPCD 0002Dh */
+};
+
+/*DP to VGA converter*/
+static const uint8_t DP_VGA_CONVERTER_ID_1[] = "mVGAa";
+/*DP to Dual link DVI converter*/
+static const uint8_t DP_DVI_CONVERTER_ID_1[] = "m2DVIa";
+/*Travis*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
+/*Nutmeg*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
+/*DP to VGA converter*/
+static const uint8_t DP_VGA_CONVERTER_ID_4[] = "DpVga";
+/*DP to Dual link DVI converter*/
+static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
+/*DP to Dual link DVI converter 2*/
+static const uint8_t DP_DVI_CONVERTER_ID_42[] = "v2DVIa";
+
+static const uint8_t DP_SINK_DEV_STRING_ID2_REV0[] = "\0\0\0\0\0\0";
+
+/* Identifies second generation PSR TCON from Parade: Device ID string:
+ * yy-xx-**-**-**-**
+ */
+/* xx - Hw ID high byte */
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_HIGH_BYTE =
+ 0x06;
+
+/* yy - HW ID low byte, the same silicon has several package/feature flavors */
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE1 =
+ 0x61;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE2 =
+ 0x62;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE3 =
+ 0x63;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE4 =
+ 0x72;
+static const uint32_t DP_SINK_DEV_STRING_ID2_REV1_HW_ID_LOW_BYTE5 =
+ 0x73;
+
+#endif /* __DAL_DDC_SERVICE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
new file mode 100644
index 000000000000..d8e52e3b8e3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DPCD_DEFS_H__
+#define __DAL_DPCD_DEFS_H__
+
+#include <drm/drm_dp_helper.h>
+
+enum dpcd_revision {
+ DPCD_REV_10 = 0x10,
+ DPCD_REV_11 = 0x11,
+ DPCD_REV_12 = 0x12,
+ DPCD_REV_13 = 0x13,
+ DPCD_REV_14 = 0x14
+};
+
+/* these are the types stored at DOWNSTREAMPORT_PRESENT */
+enum dpcd_downstream_port_type {
+ DOWNSTREAM_DP = 0,
+ DOWNSTREAM_VGA,
+ DOWNSTREAM_DVI_HDMI,
+ DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
+};
+
+enum dpcd_link_test_patterns {
+ LINK_TEST_PATTERN_NONE = 0,
+ LINK_TEST_PATTERN_COLOR_RAMP,
+ LINK_TEST_PATTERN_VERTICAL_BARS,
+ LINK_TEST_PATTERN_COLOR_SQUARES
+};
+
+enum dpcd_test_color_format {
+ TEST_COLOR_FORMAT_RGB = 0,
+ TEST_COLOR_FORMAT_YCBCR422,
+ TEST_COLOR_FORMAT_YCBCR444
+};
+
+enum dpcd_test_bit_depth {
+ TEST_BIT_DEPTH_6 = 0,
+ TEST_BIT_DEPTH_8,
+ TEST_BIT_DEPTH_10,
+ TEST_BIT_DEPTH_12,
+ TEST_BIT_DEPTH_16
+};
+
+/* PHY (encoder) test patterns
+The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248)
+*/
+enum dpcd_phy_test_patterns {
+ PHY_TEST_PATTERN_NONE = 0,
+ PHY_TEST_PATTERN_D10_2,
+ PHY_TEST_PATTERN_SYMBOL_ERROR,
+ PHY_TEST_PATTERN_PRBS7,
+ PHY_TEST_PATTERN_80BIT_CUSTOM,/* For DP1.2 only */
+ PHY_TEST_PATTERN_CP2520_1,
+ PHY_TEST_PATTERN_CP2520_2,
+ PHY_TEST_PATTERN_CP2520_3, /* same as TPS4 */
+};
+
+enum dpcd_test_dyn_range {
+ TEST_DYN_RANGE_VESA = 0,
+ TEST_DYN_RANGE_CEA
+};
+
+enum dpcd_audio_test_pattern {
+ AUDIO_TEST_PATTERN_OPERATOR_DEFINED = 0,/* direct HW translation */
+ AUDIO_TEST_PATTERN_SAWTOOTH
+};
+
+enum dpcd_audio_sampling_rate {
+ AUDIO_SAMPLING_RATE_32KHZ = 0,/* direct HW translation */
+ AUDIO_SAMPLING_RATE_44_1KHZ,
+ AUDIO_SAMPLING_RATE_48KHZ,
+ AUDIO_SAMPLING_RATE_88_2KHZ,
+ AUDIO_SAMPLING_RATE_96KHZ,
+ AUDIO_SAMPLING_RATE_176_4KHZ,
+ AUDIO_SAMPLING_RATE_192KHZ
+};
+
+enum dpcd_audio_channels {
+ AUDIO_CHANNELS_1 = 0,/* direct HW translation */
+ AUDIO_CHANNELS_2,
+ AUDIO_CHANNELS_3,
+ AUDIO_CHANNELS_4,
+ AUDIO_CHANNELS_5,
+ AUDIO_CHANNELS_6,
+ AUDIO_CHANNELS_7,
+ AUDIO_CHANNELS_8,
+
+ AUDIO_CHANNELS_COUNT
+};
+
+enum dpcd_audio_test_pattern_periods {
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_NOTUSED = 0,/* direct HW translation */
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_3,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_6,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_12,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_24,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_48,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_96,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_192,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_384,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_768,
+ DPCD_AUDIO_TEST_PATTERN_PERIOD_1536
+};
+
+/* This enum is for programming DPCD TRAINING_PATTERN_SET */
+enum dpcd_training_patterns {
+ DPCD_TRAINING_PATTERN_VIDEOIDLE = 0,/* direct HW translation! */
+ DPCD_TRAINING_PATTERN_1,
+ DPCD_TRAINING_PATTERN_2,
+ DPCD_TRAINING_PATTERN_3,
+ DPCD_TRAINING_PATTERN_4 = 7
+};
+
+/* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus
+It defines the possible PSR states. */
+enum dpcd_psr_sink_states {
+ PSR_SINK_STATE_INACTIVE = 0,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SOURCE_TIMING = 1,
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB = 2,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_DISPLAY_ON_SINK_TIMING = 3,
+ PSR_SINK_STATE_ACTIVE_CAPTURE_TIMING_RESYNC = 4,
+ PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
+};
+
+#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
new file mode 100644
index 000000000000..3248f699daf2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -0,0 +1,466 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_FIXED31_32_H__
+#define __DAL_FIXED31_32_H__
+
+#include "os_types.h"
+
+#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+
+/*
+ * @brief
+ * Arithmetic operations on real numbers
+ * represented as fixed-point numbers.
+ * There are: 1 bit for sign,
+ * 31 bit for integer part,
+ * 32 bits for fractional part.
+ *
+ * @note
+ * Currently, overflows and underflows are asserted;
+ * no special result returned.
+ */
+
+struct fixed31_32 {
+ int64_t value;
+};
+
+/*
+ * @brief
+ * Useful constants
+ */
+
+static const struct fixed31_32 dal_fixed31_32_zero = { 0 };
+static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL };
+static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL };
+static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL };
+
+static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL };
+static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL };
+static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL };
+static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL };
+static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL };
+
+/*
+ * @brief
+ * Initialization routines
+ */
+
+/*
+ * @brief
+ * result = numerator / denominator
+ */
+struct fixed31_32 dal_fixed31_32_from_fraction(
+ int64_t numerator,
+ int64_t denominator);
+
+/*
+ * @brief
+ * result = arg
+ */
+struct fixed31_32 dal_fixed31_32_from_int_nonconst(int64_t arg);
+static inline struct fixed31_32 dal_fixed31_32_from_int(int64_t arg)
+{
+ if (__builtin_constant_p(arg)) {
+ struct fixed31_32 res;
+ BUILD_BUG_ON((LONG_MIN > arg) || (arg > LONG_MAX));
+ res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+ return res;
+ } else
+ return dal_fixed31_32_from_int_nonconst(arg);
+}
+
+/*
+ * @brief
+ * Unary operators
+ */
+
+/*
+ * @brief
+ * result = -arg
+ */
+static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg)
+{
+ struct fixed31_32 res;
+
+ res.value = -arg.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return dal_fixed31_32_neg(arg);
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary relational operators
+ */
+
+/*
+ * @brief
+ * result = arg1 < arg2
+ */
+static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 <= arg2
+ */
+static inline bool dal_fixed31_32_le(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 == arg2
+ */
+static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+/*
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg1;
+ else
+ return arg2;
+}
+
+/*
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg2;
+ else
+ return arg1;
+}
+
+/*
+ * @brief
+ * | min_value, when arg <= min_value
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+static inline struct fixed31_32 dal_fixed31_32_clamp(
+ struct fixed31_32 arg,
+ struct fixed31_32 min_value,
+ struct fixed31_32 max_value)
+{
+ if (dal_fixed31_32_le(arg, min_value))
+ return min_value;
+ else if (dal_fixed31_32_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary shift operators
+ */
+
+/*
+ * @brief
+ * result = arg << shift
+ */
+struct fixed31_32 dal_fixed31_32_shl(
+ struct fixed31_32 arg,
+ uint8_t shift);
+
+/*
+ * @brief
+ * result = arg >> shift
+ */
+static inline struct fixed31_32 dal_fixed31_32_shr(
+ struct fixed31_32 arg,
+ uint8_t shift)
+{
+ struct fixed31_32 res;
+ res.value = arg.value >> shift;
+ return res;
+}
+
+/*
+ * @brief
+ * Binary additive operators
+ */
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+struct fixed31_32 dal_fixed31_32_add(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_add(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+struct fixed31_32 dal_fixed31_32_sub(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_sub(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+
+/*
+ * @brief
+ * Binary multiplicative operators
+ */
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct fixed31_32 dal_fixed31_32_mul(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1,
+ int32_t arg2)
+{
+ return dal_fixed31_32_mul(arg1,
+ dal_fixed31_32_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+struct fixed31_32 dal_fixed31_32_sqr(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1,
+ int64_t arg2)
+{
+ return dal_fixed31_32_from_fraction(arg1.value,
+ dal_fixed31_32_from_int(arg2).value);
+}
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1,
+ struct fixed31_32 arg2)
+{
+ return dal_fixed31_32_from_fraction(arg1.value,
+ arg2.value);
+}
+
+/*
+ * @brief
+ * Reciprocal function
+ */
+
+/*
+ * @brief
+ * result = reciprocal(arg) := 1 / arg
+ *
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+struct fixed31_32 dal_fixed31_32_recip(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Trigonometric functions
+ */
+
+/*
+ * @brief
+ * result = sinc(arg) := sin(arg) / arg
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sinc(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = sin(arg)
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct fixed31_32 dal_fixed31_32_sin(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = cos(arg)
+ *
+ * @note
+ * Argument specified in radians
+ * and should be in [-2pi...2pi] range -
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+struct fixed31_32 dal_fixed31_32_cos(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Transcendent functions
+ */
+
+/*
+ * @brief
+ * result = exp(arg)
+ *
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+struct fixed31_32 dal_fixed31_32_exp(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = log(arg)
+ *
+ * @note
+ * Currently, abs(arg) should be less than 1.
+ * No normalization is done.
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+struct fixed31_32 dal_fixed31_32_log(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * Power function
+ */
+
+/*
+ * @brief
+ * result = pow(arg1, arg2)
+ *
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+struct fixed31_32 dal_fixed31_32_pow(
+ struct fixed31_32 arg1,
+ struct fixed31_32 arg2);
+
+/*
+ * @brief
+ * Rounding functions
+ */
+
+/*
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+int32_t dal_fixed31_32_floor(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+int32_t dal_fixed31_32_round(
+ struct fixed31_32 arg);
+
+/*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+int32_t dal_fixed31_32_ceil(
+ struct fixed31_32 arg);
+
+/* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+ * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
+ * fractional
+ */
+
+uint32_t dal_fixed31_32_u2d19(
+ struct fixed31_32 arg);
+
+uint32_t dal_fixed31_32_u0d19(
+ struct fixed31_32 arg);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/fixed32_32.h b/drivers/gpu/drm/amd/display/include/fixed32_32.h
new file mode 100644
index 000000000000..9c70341fe026
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/fixed32_32.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DAL_FIXED32_32_H__
+#define __DAL_FIXED32_32_H__
+
+#include "os_types.h"
+
+struct fixed32_32 {
+ uint64_t value;
+};
+
+static const struct fixed32_32 dal_fixed32_32_zero = { 0 };
+static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL };
+static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL };
+
+struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d);
+static inline struct fixed32_32 dal_fixed32_32_from_int(uint32_t value)
+{
+ struct fixed32_32 fx;
+
+ fx.value = (uint64_t)value<<32;
+ return fx;
+}
+
+struct fixed32_32 dal_fixed32_32_add(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_add_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_sub(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_sub_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_mul(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_mul_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+struct fixed32_32 dal_fixed32_32_div(
+ struct fixed32_32 lhs,
+ struct fixed32_32 rhs);
+struct fixed32_32 dal_fixed32_32_div_int(
+ struct fixed32_32 lhs,
+ uint32_t rhs);
+
+static inline struct fixed32_32 dal_fixed32_32_min(struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value < rhs.value) ? lhs : rhs;
+}
+
+static inline struct fixed32_32 dal_fixed32_32_max(struct fixed32_32 lhs,
+ struct fixed32_32 rhs)
+{
+ return (lhs.value > rhs.value) ? lhs : rhs;
+}
+
+static inline bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value > rhs.value;
+}
+
+static inline bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value > ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value < rhs.value;
+}
+
+static inline bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value < ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value <= rhs.value;
+}
+
+static inline bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs)
+{
+ return lhs.value <= ((uint64_t)rhs<<32);
+}
+
+static inline bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs)
+{
+ return lhs.value == rhs.value;
+}
+
+uint32_t dal_fixed32_32_ceil(struct fixed32_32 value);
+static inline uint32_t dal_fixed32_32_floor(struct fixed32_32 value)
+{
+ return value.value>>32;
+}
+
+uint32_t dal_fixed32_32_round(struct fixed32_32 value);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_interface.h b/drivers/gpu/drm/amd/display/include/gpio_interface.h
new file mode 100644
index 000000000000..e4fd31024b92
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_interface.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_INTERFACE_H__
+#define __DAL_GPIO_INTERFACE_H__
+
+#include "gpio_types.h"
+#include "grph_object_defs.h"
+
+struct gpio;
+
+/* Open the handle for future use */
+enum gpio_result dal_gpio_open(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+enum gpio_result dal_gpio_open_ex(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+/* Get high or low from the pin */
+enum gpio_result dal_gpio_get_value(
+ const struct gpio *gpio,
+ uint32_t *value);
+
+/* Set pin high or low */
+enum gpio_result dal_gpio_set_value(
+ const struct gpio *gpio,
+ uint32_t value);
+
+/* Get current mode */
+enum gpio_mode dal_gpio_get_mode(
+ const struct gpio *gpio);
+
+/* Change mode of the handle */
+enum gpio_result dal_gpio_change_mode(
+ struct gpio *gpio,
+ enum gpio_mode mode);
+
+/* Get the GPIO id */
+enum gpio_id dal_gpio_get_id(
+ const struct gpio *gpio);
+
+/* Get the GPIO enum */
+uint32_t dal_gpio_get_enum(
+ const struct gpio *gpio);
+
+/* Set the GPIO pin configuration */
+enum gpio_result dal_gpio_set_config(
+ struct gpio *gpio,
+ const struct gpio_config_data *config_data);
+
+/* Obtain GPIO pin info */
+enum gpio_result dal_gpio_get_pin_info(
+ const struct gpio *gpio,
+ struct gpio_pin_info *pin_info);
+
+/* Obtain GPIO sync source */
+enum sync_source dal_gpio_get_sync_source(
+ const struct gpio *gpio);
+
+/* Obtain GPIO pin output state (active low or active high) */
+enum gpio_pin_output_state dal_gpio_get_output_state(
+ const struct gpio *gpio);
+
+/* Close the handle */
+void dal_gpio_close(
+ struct gpio *gpio);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_service_interface.h b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
new file mode 100644
index 000000000000..f40259bade40
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_service_interface.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_SERVICE_INTERFACE_H__
+#define __DAL_GPIO_SERVICE_INTERFACE_H__
+
+#include "gpio_types.h"
+#include "gpio_interface.h"
+#include "hw/gpio.h"
+
+struct gpio_service;
+
+struct gpio *dal_gpio_create(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en,
+ enum gpio_pin_output_state output_state);
+
+void dal_gpio_destroy(
+ struct gpio **ptr);
+
+struct gpio_service *dal_gpio_service_create(
+ enum dce_version dce_version_major,
+ enum dce_version dce_version_minor,
+ struct dc_context *ctx);
+
+struct gpio *dal_gpio_service_create_irq(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask);
+
+struct ddc *dal_gpio_create_ddc(
+ struct gpio_service *service,
+ uint32_t offset,
+ uint32_t mask,
+ struct gpio_ddc_hw_info *info);
+
+
+void dal_gpio_destroy_ddc(
+ struct ddc **ddc);
+
+void dal_gpio_service_destroy(
+ struct gpio_service **ptr);
+
+enum dc_irq_source dal_irq_get_source(
+ const struct gpio *irq);
+
+enum dc_irq_source dal_irq_get_rx_source(
+ const struct gpio *irq);
+
+enum gpio_result dal_irq_setup_hpd_filter(
+ struct gpio *irq,
+ struct gpio_hpd_config *config);
+
+struct gpio *dal_gpio_create_irq(
+ struct gpio_service *service,
+ enum gpio_id id,
+ uint32_t en);
+
+void dal_gpio_destroy_irq(
+ struct gpio **ptr);
+
+
+enum gpio_result dal_ddc_open(
+ struct ddc *ddc,
+ enum gpio_mode mode,
+ enum gpio_ddc_config_type config_type);
+
+enum gpio_result dal_ddc_change_mode(
+ struct ddc *ddc,
+ enum gpio_mode mode);
+
+enum gpio_ddc_line dal_ddc_get_line(
+ const struct ddc *ddc);
+
+enum gpio_result dal_ddc_set_config(
+ struct ddc *ddc,
+ enum gpio_ddc_config_type config_type);
+
+void dal_ddc_close(
+ struct ddc *ddc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/gpio_types.h b/drivers/gpu/drm/amd/display/include/gpio_types.h
new file mode 100644
index 000000000000..8dd46ed799e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/gpio_types.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GPIO_TYPES_H__
+#define __DAL_GPIO_TYPES_H__
+
+#define BUNDLE_A_MASK 0x00FFF000L
+#define BUNDLE_B_MASK 0x00000FFFL
+
+/*
+ * gpio_result
+ *
+ * @brief
+ * The possible return codes that the GPIO object can return.
+ * These return codes can be generated
+ * directly by the GPIO object or from the GPIOPin object.
+ */
+enum gpio_result {
+ GPIO_RESULT_OK,
+ GPIO_RESULT_NULL_HANDLE,
+ GPIO_RESULT_INVALID_DATA,
+ GPIO_RESULT_DEVICE_BUSY,
+ GPIO_RESULT_OPEN_FAILED,
+ GPIO_RESULT_ALREADY_OPENED,
+ GPIO_RESULT_NON_SPECIFIC_ERROR
+};
+
+/*
+ * @brief
+ * Used to identify the specific GPIO device
+ *
+ * @notes
+ * These constants are used as indices in a vector.
+ * Thus they should start from zero and be contiguous.
+ */
+enum gpio_id {
+ GPIO_ID_UNKNOWN = (-1),
+ GPIO_ID_DDC_DATA,
+ GPIO_ID_DDC_CLOCK,
+ GPIO_ID_GENERIC,
+ GPIO_ID_HPD,
+ GPIO_ID_GPIO_PAD,
+ GPIO_ID_VIP_PAD,
+ GPIO_ID_SYNC,
+ GPIO_ID_GSL, /* global swap lock */
+ GPIO_ID_COUNT,
+ GPIO_ID_MIN = GPIO_ID_DDC_DATA,
+ GPIO_ID_MAX = GPIO_ID_GSL
+};
+
+#define GPIO_ENUM_UNKNOWN \
+ 32
+
+struct gpio_pin_info {
+ uint32_t offset;
+ uint32_t offset_y;
+ uint32_t offset_en;
+ uint32_t offset_mask;
+
+ uint32_t mask;
+ uint32_t mask_y;
+ uint32_t mask_en;
+ uint32_t mask_mask;
+};
+
+enum gpio_pin_output_state {
+ GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW,
+ GPIO_PIN_OUTPUT_STATE_ACTIVE_HIGH,
+ GPIO_PIN_OUTPUT_STATE_DEFAULT = GPIO_PIN_OUTPUT_STATE_ACTIVE_LOW
+};
+
+enum gpio_generic {
+ GPIO_GENERIC_UNKNOWN = (-1),
+ GPIO_GENERIC_A,
+ GPIO_GENERIC_B,
+ GPIO_GENERIC_C,
+ GPIO_GENERIC_D,
+ GPIO_GENERIC_E,
+ GPIO_GENERIC_F,
+ GPIO_GENERIC_G,
+ GPIO_GENERIC_COUNT,
+ GPIO_GENERIC_MIN = GPIO_GENERIC_A,
+ GPIO_GENERIC_MAX = GPIO_GENERIC_B
+};
+
+enum gpio_hpd {
+ GPIO_HPD_UNKNOWN = (-1),
+ GPIO_HPD_1,
+ GPIO_HPD_2,
+ GPIO_HPD_3,
+ GPIO_HPD_4,
+ GPIO_HPD_5,
+ GPIO_HPD_6,
+ GPIO_HPD_COUNT,
+ GPIO_HPD_MIN = GPIO_HPD_1,
+ GPIO_HPD_MAX = GPIO_HPD_6
+};
+
+enum gpio_gpio_pad {
+ GPIO_GPIO_PAD_UNKNOWN = (-1),
+ GPIO_GPIO_PAD_0,
+ GPIO_GPIO_PAD_1,
+ GPIO_GPIO_PAD_2,
+ GPIO_GPIO_PAD_3,
+ GPIO_GPIO_PAD_4,
+ GPIO_GPIO_PAD_5,
+ GPIO_GPIO_PAD_6,
+ GPIO_GPIO_PAD_7,
+ GPIO_GPIO_PAD_8,
+ GPIO_GPIO_PAD_9,
+ GPIO_GPIO_PAD_10,
+ GPIO_GPIO_PAD_11,
+ GPIO_GPIO_PAD_12,
+ GPIO_GPIO_PAD_13,
+ GPIO_GPIO_PAD_14,
+ GPIO_GPIO_PAD_15,
+ GPIO_GPIO_PAD_16,
+ GPIO_GPIO_PAD_17,
+ GPIO_GPIO_PAD_18,
+ GPIO_GPIO_PAD_19,
+ GPIO_GPIO_PAD_20,
+ GPIO_GPIO_PAD_21,
+ GPIO_GPIO_PAD_22,
+ GPIO_GPIO_PAD_23,
+ GPIO_GPIO_PAD_24,
+ GPIO_GPIO_PAD_25,
+ GPIO_GPIO_PAD_26,
+ GPIO_GPIO_PAD_27,
+ GPIO_GPIO_PAD_28,
+ GPIO_GPIO_PAD_29,
+ GPIO_GPIO_PAD_30,
+ GPIO_GPIO_PAD_COUNT,
+ GPIO_GPIO_PAD_MIN = GPIO_GPIO_PAD_0,
+ GPIO_GPIO_PAD_MAX = GPIO_GPIO_PAD_30
+};
+
+enum gpio_vip_pad {
+ GPIO_VIP_PAD_UNKNOWN = (-1),
+ /* following never used -
+ * GPIO_ID_DDC_CLOCK::GPIO_DDC_LINE_VIP_PAD defined instead */
+ GPIO_VIP_PAD_SCL,
+ /* following never used -
+ * GPIO_ID_DDC_DATA::GPIO_DDC_LINE_VIP_PAD defined instead */
+ GPIO_VIP_PAD_SDA,
+ GPIO_VIP_PAD_VHAD,
+ GPIO_VIP_PAD_VPHCTL,
+ GPIO_VIP_PAD_VIPCLK,
+ GPIO_VIP_PAD_VID,
+ GPIO_VIP_PAD_VPCLK0,
+ GPIO_VIP_PAD_DVALID,
+ GPIO_VIP_PAD_PSYNC,
+ GPIO_VIP_PAD_COUNT,
+ GPIO_VIP_PAD_MIN = GPIO_VIP_PAD_SCL,
+ GPIO_VIP_PAD_MAX = GPIO_VIP_PAD_PSYNC
+};
+
+enum gpio_sync {
+ GPIO_SYNC_UNKNOWN = (-1),
+ GPIO_SYNC_HSYNC_A,
+ GPIO_SYNC_VSYNC_A,
+ GPIO_SYNC_HSYNC_B,
+ GPIO_SYNC_VSYNC_B,
+ GPIO_SYNC_COUNT,
+ GPIO_SYNC_MIN = GPIO_SYNC_HSYNC_A,
+ GPIO_SYNC_MAX = GPIO_SYNC_VSYNC_B
+};
+
+enum gpio_gsl {
+ GPIO_GSL_UNKNOWN = (-1),
+ GPIO_GSL_GENLOCK_CLOCK,
+ GPIO_GSL_GENLOCK_VSYNC,
+ GPIO_GSL_SWAPLOCK_A,
+ GPIO_GSL_SWAPLOCK_B,
+ GPIO_GSL_COUNT,
+ GPIO_GSL_MIN = GPIO_GSL_GENLOCK_CLOCK,
+ GPIO_GSL_MAX = GPIO_GSL_SWAPLOCK_B
+};
+
+/*
+ * @brief
+ * Unique Id for DDC handle.
+ * Values are meaningful (used as indexes to array)
+ */
+enum gpio_ddc_line {
+ GPIO_DDC_LINE_UNKNOWN = (-1),
+ GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_DDC2,
+ GPIO_DDC_LINE_DDC3,
+ GPIO_DDC_LINE_DDC4,
+ GPIO_DDC_LINE_DDC5,
+ GPIO_DDC_LINE_DDC6,
+ GPIO_DDC_LINE_DDC_VGA,
+ GPIO_DDC_LINE_VIP_PAD,
+ GPIO_DDC_LINE_I2C_PAD = GPIO_DDC_LINE_VIP_PAD,
+ GPIO_DDC_LINE_COUNT,
+ GPIO_DDC_LINE_MIN = GPIO_DDC_LINE_DDC1,
+ GPIO_DDC_LINE_MAX = GPIO_DDC_LINE_I2C_PAD
+};
+
+/*
+ * @brief
+ * Identifies the mode of operation to open a GPIO device.
+ * A GPIO device (pin) can be programmed in only one of these modes at a time.
+ */
+enum gpio_mode {
+ GPIO_MODE_UNKNOWN = (-1),
+ GPIO_MODE_INPUT,
+ GPIO_MODE_OUTPUT,
+ GPIO_MODE_FAST_OUTPUT,
+ GPIO_MODE_HARDWARE,
+ GPIO_MODE_INTERRUPT
+};
+
+/*
+ * @brief
+ * Identifies the source of the signal when GPIO is in HW mode.
+ * get_signal_source() will return GPIO_SYGNAL_SOURCE__UNKNOWN
+ * when one of the following holds:
+ * 1. GPIO is input GPIO
+ * 2. GPIO is not opened in HW mode
+ * 3. GPIO does not have fixed signal source
+ * (like DC_GenericA have mux instead fixed)
+ */
+enum gpio_signal_source {
+ GPIO_SIGNAL_SOURCE_UNKNOWN = (-1),
+ GPIO_SIGNAL_SOURCE_DACA_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_DACB_STEREO_SYNC,
+ GPIO_SIGNAL_SOURCE_DACA_HSYNC,
+ GPIO_SIGNAL_SOURCE_DACB_HSYNC,
+ GPIO_SIGNAL_SOURCE_DACA_VSYNC,
+ GPIO_SIGNAL_SOURCE_DACB_VSYNC,
+};
+
+enum gpio_stereo_source {
+ GPIO_STEREO_SOURCE_UNKNOWN = (-1),
+ GPIO_STEREO_SOURCE_D1,
+ GPIO_STEREO_SOURCE_D2,
+ GPIO_STEREO_SOURCE_D3,
+ GPIO_STEREO_SOURCE_D4,
+ GPIO_STEREO_SOURCE_D5,
+ GPIO_STEREO_SOURCE_D6
+};
+
+/*
+ * GPIO config
+ */
+
+enum gpio_config_type {
+ GPIO_CONFIG_TYPE_NONE,
+ GPIO_CONFIG_TYPE_DDC,
+ GPIO_CONFIG_TYPE_HPD,
+ GPIO_CONFIG_TYPE_GENERIC_MUX,
+ GPIO_CONFIG_TYPE_GSL_MUX,
+ GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE
+};
+
+/* DDC configuration */
+
+enum gpio_ddc_config_type {
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C,
+ GPIO_DDC_CONFIG_TYPE_POLL_FOR_CONNECT,
+ GPIO_DDC_CONFIG_TYPE_POLL_FOR_DISCONNECT,
+ GPIO_DDC_CONFIG_TYPE_DISABLE_POLLING
+};
+
+struct gpio_ddc_config {
+ enum gpio_ddc_config_type type;
+ bool data_en_bit_present;
+ bool clock_en_bit_present;
+};
+
+/* HPD configuration */
+
+struct gpio_hpd_config {
+ uint32_t delay_on_connect; /* milliseconds */
+ uint32_t delay_on_disconnect; /* milliseconds */
+};
+
+struct gpio_generic_mux_config {
+ bool enable_output_from_mux;
+ enum gpio_signal_source mux_select;
+ enum gpio_stereo_source stereo_select;
+};
+
+enum gpio_gsl_mux_config_type {
+ GPIO_GSL_MUX_CONFIG_TYPE_DISABLE,
+ GPIO_GSL_MUX_CONFIG_TYPE_TIMING_SYNC,
+ GPIO_GSL_MUX_CONFIG_TYPE_FLIP_SYNC
+};
+
+struct gpio_gsl_mux_config {
+ enum gpio_gsl_mux_config_type type;
+ /* Actually sync_source type,
+ * however we want to avoid inter-component includes here */
+ uint32_t gsl_group;
+};
+
+struct gpio_config_data {
+ enum gpio_config_type type;
+ union {
+ struct gpio_ddc_config ddc;
+ struct gpio_hpd_config hpd;
+ struct gpio_generic_mux_config generic_mux;
+ struct gpio_gsl_mux_config gsl_mux;
+ } config;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
new file mode 100644
index 000000000000..7a9b43f84a31
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -0,0 +1,445 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_CTRL_DEFS_H__
+#define __DAL_GRPH_OBJECT_CTRL_DEFS_H__
+
+#include "grph_object_defs.h"
+
+/*
+ * #####################################################
+ * #####################################################
+ *
+ * These defines shared between asic_control/bios_parser and other
+ * DAL components
+ *
+ * #####################################################
+ * #####################################################
+ */
+
+enum display_output_bit_depth {
+ PANEL_UNDEFINE = 0,
+ PANEL_6BIT_COLOR = 1,
+ PANEL_8BIT_COLOR = 2,
+ PANEL_10BIT_COLOR = 3,
+ PANEL_12BIT_COLOR = 4,
+ PANEL_16BIT_COLOR = 5,
+};
+
+
+/* Device type as abstracted by ATOM BIOS */
+enum dal_device_type {
+ DEVICE_TYPE_UNKNOWN = 0,
+ DEVICE_TYPE_LCD,
+ DEVICE_TYPE_CRT,
+ DEVICE_TYPE_DFP,
+ DEVICE_TYPE_CV,
+ DEVICE_TYPE_TV,
+ DEVICE_TYPE_CF,
+ DEVICE_TYPE_WIRELESS
+};
+
+/* Device ID as abstracted by ATOM BIOS */
+struct device_id {
+ enum dal_device_type device_type:16;
+ uint32_t enum_id:16; /* 1 based enum */
+ uint16_t raw_device_tag;
+};
+
+struct graphics_object_i2c_info {
+ struct gpio_info {
+ uint32_t clk_mask_register_index;
+ uint32_t clk_en_register_index;
+ uint32_t clk_y_register_index;
+ uint32_t clk_a_register_index;
+ uint32_t data_mask_register_index;
+ uint32_t data_en_register_index;
+ uint32_t data_y_register_index;
+ uint32_t data_a_register_index;
+
+ uint32_t clk_mask_shift;
+ uint32_t clk_en_shift;
+ uint32_t clk_y_shift;
+ uint32_t clk_a_shift;
+ uint32_t data_mask_shift;
+ uint32_t data_en_shift;
+ uint32_t data_y_shift;
+ uint32_t data_a_shift;
+ } gpio_info;
+
+ bool i2c_hw_assist;
+ uint32_t i2c_line;
+ uint32_t i2c_engine_id;
+ uint32_t i2c_slave_address;
+};
+
+struct graphics_object_hpd_info {
+ uint8_t hpd_int_gpio_uid;
+ uint8_t hpd_active;
+};
+
+struct connector_device_tag_info {
+ uint32_t acpi_device;
+ struct device_id dev_id;
+};
+
+struct device_timing {
+ struct misc_info {
+ uint32_t HORIZONTAL_CUT_OFF:1;
+ /* 0=Active High, 1=Active Low */
+ uint32_t H_SYNC_POLARITY:1;
+ /* 0=Active High, 1=Active Low */
+ uint32_t V_SYNC_POLARITY:1;
+ uint32_t VERTICAL_CUT_OFF:1;
+ uint32_t H_REPLICATION_BY2:1;
+ uint32_t V_REPLICATION_BY2:1;
+ uint32_t COMPOSITE_SYNC:1;
+ uint32_t INTERLACE:1;
+ uint32_t DOUBLE_CLOCK:1;
+ uint32_t RGB888:1;
+ uint32_t GREY_LEVEL:2;
+ uint32_t SPATIAL:1;
+ uint32_t TEMPORAL:1;
+ uint32_t API_ENABLED:1;
+ } misc_info;
+
+ uint32_t pixel_clk; /* in KHz */
+ uint32_t horizontal_addressable;
+ uint32_t horizontal_blanking_time;
+ uint32_t vertical_addressable;
+ uint32_t vertical_blanking_time;
+ uint32_t horizontal_sync_offset;
+ uint32_t horizontal_sync_width;
+ uint32_t vertical_sync_offset;
+ uint32_t vertical_sync_width;
+ uint32_t horizontal_border;
+ uint32_t vertical_border;
+};
+
+struct supported_refresh_rate {
+ uint32_t REFRESH_RATE_30HZ:1;
+ uint32_t REFRESH_RATE_40HZ:1;
+ uint32_t REFRESH_RATE_48HZ:1;
+ uint32_t REFRESH_RATE_50HZ:1;
+ uint32_t REFRESH_RATE_60HZ:1;
+};
+
+struct embedded_panel_info {
+ struct device_timing lcd_timing;
+ uint32_t ss_id;
+ struct supported_refresh_rate supported_rr;
+ uint32_t drr_enabled;
+ uint32_t min_drr_refresh_rate;
+ bool realtek_eDPToLVDS;
+};
+
+struct dc_firmware_info {
+ struct pll_info {
+ uint32_t crystal_frequency; /* in KHz */
+ uint32_t min_input_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t max_input_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t min_output_pxl_clk_pll_frequency; /* in KHz */
+ uint32_t max_output_pxl_clk_pll_frequency; /* in KHz */
+ } pll_info;
+
+ struct firmware_feature {
+ uint32_t memory_clk_ss_percentage;
+ uint32_t engine_clk_ss_percentage;
+ } feature;
+
+ uint32_t default_display_engine_pll_frequency; /* in KHz */
+ uint32_t external_clock_source_frequency_for_dp; /* in KHz */
+ uint32_t smu_gpu_pll_output_freq; /* in KHz */
+ uint8_t min_allowed_bl_level;
+ uint8_t remote_display_config;
+ uint32_t default_memory_clk; /* in KHz */
+ uint32_t default_engine_clk; /* in KHz */
+ uint32_t dp_phy_ref_clk; /* in KHz - DCE12 only */
+ uint32_t i2c_engine_ref_clk; /* in KHz - DCE12 only */
+
+
+};
+
+struct step_and_delay_info {
+ uint32_t step;
+ uint32_t delay;
+ uint32_t recommended_ref_div;
+};
+
+struct spread_spectrum_info {
+ struct spread_spectrum_type {
+ bool CENTER_MODE:1;
+ bool EXTERNAL:1;
+ bool STEP_AND_DELAY_INFO:1;
+ } type;
+
+ /* in unit of 0.01% (spreadPercentageDivider = 100),
+ otherwise in 0.001% units (spreadPercentageDivider = 1000); */
+ uint32_t spread_spectrum_percentage;
+ uint32_t spread_percentage_divider; /* 100 or 1000 */
+ uint32_t spread_spectrum_range; /* modulation freq (HZ)*/
+
+ union {
+ struct step_and_delay_info step_and_delay_info;
+ /* For mem/engine/uvd, Clock Out frequence (VCO ),
+ in unit of kHz. For TMDS/HDMI/LVDS, it is pixel clock,
+ for DP, it is link clock ( 270000 or 162000 ) */
+ uint32_t target_clock_range; /* in KHz */
+ };
+
+};
+
+struct graphics_object_encoder_cap_info {
+ uint32_t dp_hbr2_cap:1;
+ uint32_t dp_hbr2_validated:1;
+ /*
+ * TODO: added MST and HDMI 6G capable flags
+ */
+ uint32_t reserved:15;
+};
+
+struct din_connector_info {
+ uint32_t gpio_id;
+ bool gpio_tv_active_state;
+};
+
+/* Invalid channel mapping */
+enum { INVALID_DDI_CHANNEL_MAPPING = 0x0 };
+
+/**
+ * DDI PHY channel mapping reflecting XBAR setting
+ */
+union ddi_channel_mapping {
+ struct mapping {
+ uint8_t lane0:2; /* Mapping for lane 0 */
+ uint8_t lane1:2; /* Mapping for lane 1 */
+ uint8_t lane2:2; /* Mapping for lane 2 */
+ uint8_t lane3:2; /* Mapping for lane 3 */
+ } mapping;
+ uint8_t raw;
+};
+
+/**
+* Transmitter output configuration description
+*/
+struct transmitter_configuration_info {
+ /* DDI PHY ID for the transmitter */
+ enum transmitter transmitter_phy_id;
+ /* DDI PHY channel mapping reflecting crossbar setting */
+ union ddi_channel_mapping output_channel_mapping;
+};
+
+struct transmitter_configuration {
+ /* Configuration for the primary transmitter */
+ struct transmitter_configuration_info primary_transmitter_config;
+ /* Secondary transmitter configuration for Dual-link DVI */
+ struct transmitter_configuration_info secondary_transmitter_config;
+};
+
+/* These size should be sufficient to store info coming from BIOS */
+#define NUMBER_OF_UCHAR_FOR_GUID 16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+#define NUMBER_OF_CSR_M3_ARB 10
+#define NUMBER_OF_DISP_CLK_VOLTAGE 4
+#define NUMBER_OF_AVAILABLE_SCLK 5
+
+struct i2c_reg_info {
+ unsigned char i2c_reg_index;
+ unsigned char i2c_reg_val;
+};
+
+struct ext_hdmi_settings {
+ unsigned char slv_addr;
+ unsigned char reg_num;
+ struct i2c_reg_info reg_settings[9];
+ unsigned char reg_num_6g;
+ struct i2c_reg_info reg_settings_6g[3];
+};
+
+
+/* V6 */
+struct integrated_info {
+ struct clock_voltage_caps {
+ /* The Voltage Index indicated by FUSE, same voltage index
+ shared with SCLK DPM fuse table */
+ uint32_t voltage_index;
+ /* Maximum clock supported with specified voltage index */
+ uint32_t max_supported_clk; /* in KHz */
+ } disp_clk_voltage[NUMBER_OF_DISP_CLK_VOLTAGE];
+
+ struct display_connection_info {
+ struct external_display_path {
+ /* A bit vector to show what devices are supported */
+ uint32_t device_tag;
+ /* 16bit device ACPI id. */
+ uint32_t device_acpi_enum;
+ /* A physical connector for displays to plug in,
+ using object connector definitions */
+ struct graphics_object_id device_connector_id;
+ /* An index into external AUX/DDC channel LUT */
+ uint8_t ext_aux_ddc_lut_index;
+ /* An index into external HPD pin LUT */
+ uint8_t ext_hpd_pin_lut_index;
+ /* external encoder object id */
+ struct graphics_object_id ext_encoder_obj_id;
+ /* XBAR mapping of the PHY channels */
+ union ddi_channel_mapping channel_mapping;
+
+ unsigned short caps;
+ } path[MAX_NUMBER_OF_EXT_DISPLAY_PATH];
+
+ uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID];
+ uint8_t checksum;
+ } ext_disp_conn_info; /* exiting long long time */
+
+ struct available_s_clk_list {
+ /* Maximum clock supported with specified voltage index */
+ uint32_t supported_s_clk; /* in KHz */
+ /* The Voltage Index indicated by FUSE for specified SCLK */
+ uint32_t voltage_index;
+ /* The Voltage ID indicated by FUSE for specified SCLK */
+ uint32_t voltage_id;
+ } avail_s_clk[NUMBER_OF_AVAILABLE_SCLK];
+
+ uint8_t memory_type;
+ uint8_t ma_channel_number;
+ uint32_t boot_up_engine_clock; /* in KHz */
+ uint32_t dentist_vco_freq; /* in KHz */
+ uint32_t boot_up_uma_clock; /* in KHz */
+ uint32_t boot_up_req_display_vector;
+ uint32_t other_display_misc;
+ uint32_t gpu_cap_info;
+ uint32_t sb_mmio_base_addr;
+ uint32_t system_config;
+ uint32_t cpu_cap_info;
+ uint32_t max_nb_voltage;
+ uint32_t min_nb_voltage;
+ uint32_t boot_up_nb_voltage;
+ uint32_t ext_disp_conn_info_offset;
+ uint32_t csr_m3_arb_cntl_default[NUMBER_OF_CSR_M3_ARB];
+ uint32_t csr_m3_arb_cntl_uvd[NUMBER_OF_CSR_M3_ARB];
+ uint32_t csr_m3_arb_cntl_fs3d[NUMBER_OF_CSR_M3_ARB];
+ uint32_t gmc_restore_reset_time;
+ uint32_t minimum_n_clk;
+ uint32_t idle_n_clk;
+ uint32_t ddr_dll_power_up_time;
+ uint32_t ddr_pll_power_up_time;
+ /* start for V6 */
+ uint32_t pcie_clk_ss_type;
+ uint32_t lvds_ss_percentage;
+ uint32_t lvds_sspread_rate_in_10hz;
+ uint32_t hdmi_ss_percentage;
+ uint32_t hdmi_sspread_rate_in_10hz;
+ uint32_t dvi_ss_percentage;
+ uint32_t dvi_sspread_rate_in_10_hz;
+ uint32_t sclk_dpm_boost_margin;
+ uint32_t sclk_dpm_throttle_margin;
+ uint32_t sclk_dpm_tdp_limit_pg;
+ uint32_t sclk_dpm_tdp_limit_boost;
+ uint32_t boost_engine_clock;
+ uint32_t boost_vid_2bit;
+ uint32_t enable_boost;
+ uint32_t gnb_tdp_limit;
+ /* Start from V7 */
+ uint32_t max_lvds_pclk_freq_in_single_link;
+ uint32_t lvds_misc;
+ uint32_t lvds_pwr_on_seq_dig_on_to_de_in_4ms;
+ uint32_t lvds_pwr_on_seq_de_to_vary_bl_in_4ms;
+ uint32_t lvds_pwr_off_seq_vary_bl_to_de_in4ms;
+ uint32_t lvds_pwr_off_seq_de_to_dig_on_in4ms;
+ uint32_t lvds_off_to_on_delay_in_4ms;
+ uint32_t lvds_pwr_on_seq_vary_bl_to_blon_in_4ms;
+ uint32_t lvds_pwr_off_seq_blon_to_vary_bl_in_4ms;
+ uint32_t lvds_reserved1;
+ uint32_t lvds_bit_depth_control_val;
+ //Start from V9
+ unsigned char dp0_ext_hdmi_slv_addr;
+ unsigned char dp0_ext_hdmi_reg_num;
+ struct i2c_reg_info dp0_ext_hdmi_reg_settings[9];
+ unsigned char dp0_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp0_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp1_ext_hdmi_slv_addr;
+ unsigned char dp1_ext_hdmi_reg_num;
+ struct i2c_reg_info dp1_ext_hdmi_reg_settings[9];
+ unsigned char dp1_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp1_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp2_ext_hdmi_slv_addr;
+ unsigned char dp2_ext_hdmi_reg_num;
+ struct i2c_reg_info dp2_ext_hdmi_reg_settings[9];
+ unsigned char dp2_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp2_ext_hdmi_6g_reg_settings[3];
+ unsigned char dp3_ext_hdmi_slv_addr;
+ unsigned char dp3_ext_hdmi_reg_num;
+ struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
+ unsigned char dp3_ext_hdmi_6g_reg_num;
+ struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
+};
+
+/**
+* Power source ids.
+*/
+enum power_source {
+ POWER_SOURCE_AC = 0,
+ POWER_SOURCE_DC,
+ POWER_SOURCE_LIMITED_POWER,
+ POWER_SOURCE_LIMITED_POWER_2,
+ POWER_SOURCE_MAX
+};
+
+struct bios_event_info {
+ uint32_t thermal_state;
+ uint32_t backlight_level;
+ enum power_source powerSource;
+ bool has_thermal_state_changed;
+ bool has_power_source_changed;
+ bool has_forced_mode_changed;
+ bool forced_mode;
+ bool backlight_changed;
+};
+
+enum {
+ HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
+ TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
+};
+
+/*
+ * DFS-bypass flag
+ */
+/* Copy of SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS from atombios.h */
+enum {
+ DFS_BYPASS_ENABLE = 0x10
+};
+
+enum {
+ INVALID_BACKLIGHT = -1
+};
+
+struct panel_backlight_boundaries {
+ uint32_t min_signal_level;
+ uint32_t max_signal_level;
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
new file mode 100644
index 000000000000..2941b882b0b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_DEFS_H__
+#define __DAL_GRPH_OBJECT_DEFS_H__
+
+#include "grph_object_id.h"
+
+/* ********************************************************************
+ * ********************************************************************
+ *
+ * These defines shared between All Graphics Objects
+ *
+ * ********************************************************************
+ * ********************************************************************
+ */
+
+/* HPD unit id - HW direct translation */
+enum hpd_source_id {
+ HPD_SOURCEID1 = 0,
+ HPD_SOURCEID2,
+ HPD_SOURCEID3,
+ HPD_SOURCEID4,
+ HPD_SOURCEID5,
+ HPD_SOURCEID6,
+
+ HPD_SOURCEID_COUNT,
+ HPD_SOURCEID_UNKNOWN
+};
+
+/* DDC unit id - HW direct translation */
+enum channel_id {
+ CHANNEL_ID_UNKNOWN = 0,
+ CHANNEL_ID_DDC1,
+ CHANNEL_ID_DDC2,
+ CHANNEL_ID_DDC3,
+ CHANNEL_ID_DDC4,
+ CHANNEL_ID_DDC5,
+ CHANNEL_ID_DDC6,
+ CHANNEL_ID_DDC_VGA,
+ CHANNEL_ID_I2C_PAD,
+ CHANNEL_ID_COUNT
+};
+
+#define DECODE_CHANNEL_ID(ch_id) \
+ (ch_id) == CHANNEL_ID_DDC1 ? "CHANNEL_ID_DDC1" : \
+ (ch_id) == CHANNEL_ID_DDC2 ? "CHANNEL_ID_DDC2" : \
+ (ch_id) == CHANNEL_ID_DDC3 ? "CHANNEL_ID_DDC3" : \
+ (ch_id) == CHANNEL_ID_DDC4 ? "CHANNEL_ID_DDC4" : \
+ (ch_id) == CHANNEL_ID_DDC5 ? "CHANNEL_ID_DDC5" : \
+ (ch_id) == CHANNEL_ID_DDC6 ? "CHANNEL_ID_DDC6" : \
+ (ch_id) == CHANNEL_ID_DDC_VGA ? "CHANNEL_ID_DDC_VGA" : \
+ (ch_id) == CHANNEL_ID_I2C_PAD ? "CHANNEL_ID_I2C_PAD" : "Invalid"
+
+enum transmitter {
+ TRANSMITTER_UNKNOWN = (-1L),
+ TRANSMITTER_UNIPHY_A,
+ TRANSMITTER_UNIPHY_B,
+ TRANSMITTER_UNIPHY_C,
+ TRANSMITTER_UNIPHY_D,
+ TRANSMITTER_UNIPHY_E,
+ TRANSMITTER_UNIPHY_F,
+ TRANSMITTER_NUTMEG_CRT,
+ TRANSMITTER_TRAVIS_CRT,
+ TRANSMITTER_TRAVIS_LCD,
+ TRANSMITTER_UNIPHY_G,
+ TRANSMITTER_COUNT
+};
+
+/* Generic source of the synchronisation input/output signal */
+/* Can be used for flow control, stereo sync, timing sync, frame sync, etc */
+enum sync_source {
+ SYNC_SOURCE_NONE = 0,
+
+ /* Source based on controllers */
+ SYNC_SOURCE_CONTROLLER0,
+ SYNC_SOURCE_CONTROLLER1,
+ SYNC_SOURCE_CONTROLLER2,
+ SYNC_SOURCE_CONTROLLER3,
+ SYNC_SOURCE_CONTROLLER4,
+ SYNC_SOURCE_CONTROLLER5,
+
+ /* Source based on GSL group */
+ SYNC_SOURCE_GSL_GROUP0,
+ SYNC_SOURCE_GSL_GROUP1,
+ SYNC_SOURCE_GSL_GROUP2,
+
+ /* Source based on GSL IOs */
+ /* These IOs normally used as GSL input/output */
+ SYNC_SOURCE_GSL_IO_FIRST,
+ SYNC_SOURCE_GSL_IO_GENLOCK_CLOCK = SYNC_SOURCE_GSL_IO_FIRST,
+ SYNC_SOURCE_GSL_IO_GENLOCK_VSYNC,
+ SYNC_SOURCE_GSL_IO_SWAPLOCK_A,
+ SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
+ SYNC_SOURCE_GSL_IO_LAST = SYNC_SOURCE_GSL_IO_SWAPLOCK_B,
+
+ /* Source based on regular IOs */
+ SYNC_SOURCE_IO_FIRST,
+ SYNC_SOURCE_IO_GENERIC_A = SYNC_SOURCE_IO_FIRST,
+ SYNC_SOURCE_IO_GENERIC_B,
+ SYNC_SOURCE_IO_GENERIC_C,
+ SYNC_SOURCE_IO_GENERIC_D,
+ SYNC_SOURCE_IO_GENERIC_E,
+ SYNC_SOURCE_IO_GENERIC_F,
+ SYNC_SOURCE_IO_HPD1,
+ SYNC_SOURCE_IO_HPD2,
+ SYNC_SOURCE_IO_HSYNC_A,
+ SYNC_SOURCE_IO_VSYNC_A,
+ SYNC_SOURCE_IO_HSYNC_B,
+ SYNC_SOURCE_IO_VSYNC_B,
+ SYNC_SOURCE_IO_LAST = SYNC_SOURCE_IO_VSYNC_B,
+
+ /* Misc. flow control sources */
+ SYNC_SOURCE_DUAL_GPU_PIN
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
new file mode 100644
index 000000000000..5eb2b4dc7b9c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_GRPH_OBJECT_ID_H__
+#define __DAL_GRPH_OBJECT_ID_H__
+
+/* Types of graphics objects */
+enum object_type {
+ OBJECT_TYPE_UNKNOWN = 0,
+
+ /* Direct ATOM BIOS translation */
+ OBJECT_TYPE_GPU,
+ OBJECT_TYPE_ENCODER,
+ OBJECT_TYPE_CONNECTOR,
+ OBJECT_TYPE_ROUTER,
+ OBJECT_TYPE_GENERIC,
+
+ /* Driver specific */
+ OBJECT_TYPE_AUDIO,
+ OBJECT_TYPE_CONTROLLER,
+ OBJECT_TYPE_CLOCK_SOURCE,
+ OBJECT_TYPE_ENGINE,
+
+ OBJECT_TYPE_COUNT
+};
+
+/* Enumeration inside one type of graphics objects */
+enum object_enum_id {
+ ENUM_ID_UNKNOWN = 0,
+ ENUM_ID_1,
+ ENUM_ID_2,
+ ENUM_ID_3,
+ ENUM_ID_4,
+ ENUM_ID_5,
+ ENUM_ID_6,
+ ENUM_ID_7,
+
+ ENUM_ID_COUNT
+};
+
+/* Generic object ids */
+enum generic_id {
+ GENERIC_ID_UNKNOWN = 0,
+ GENERIC_ID_MXM_OPM,
+ GENERIC_ID_GLSYNC,
+ GENERIC_ID_STEREO,
+
+ GENERIC_ID_COUNT
+};
+
+/* Controller object ids */
+enum controller_id {
+ CONTROLLER_ID_UNDEFINED = 0,
+ CONTROLLER_ID_D0,
+ CONTROLLER_ID_D1,
+ CONTROLLER_ID_D2,
+ CONTROLLER_ID_D3,
+ CONTROLLER_ID_D4,
+ CONTROLLER_ID_D5,
+ CONTROLLER_ID_UNDERLAY0,
+ CONTROLLER_ID_MAX = CONTROLLER_ID_UNDERLAY0
+};
+
+#define IS_UNDERLAY_CONTROLLER(ctrlr_id) (ctrlr_id >= CONTROLLER_ID_UNDERLAY0)
+
+/*
+ * ClockSource object ids.
+ * We maintain the order matching (more or less) ATOM BIOS
+ * to improve optimized acquire
+ */
+enum clock_source_id {
+ CLOCK_SOURCE_ID_UNDEFINED = 0,
+ CLOCK_SOURCE_ID_PLL0,
+ CLOCK_SOURCE_ID_PLL1,
+ CLOCK_SOURCE_ID_PLL2,
+ CLOCK_SOURCE_ID_EXTERNAL, /* ID (Phy) ref. clk. for DP */
+ CLOCK_SOURCE_ID_DCPLL,
+ CLOCK_SOURCE_ID_DFS, /* DENTIST */
+ CLOCK_SOURCE_ID_VCE, /* VCE does not need a real PLL */
+ /* Used to distinguish between programming pixel clock and ID (Phy) clock */
+ CLOCK_SOURCE_ID_DP_DTO,
+
+ CLOCK_SOURCE_COMBO_PHY_PLL0, /*combo PHY PLL defines (DC 11.2 and up)*/
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ CLOCK_SOURCE_COMBO_PHY_PLL5,
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0
+};
+
+/* Encoder object ids */
+enum encoder_id {
+ ENCODER_ID_UNKNOWN = 0,
+
+ /* Radeon Class Display Hardware */
+ ENCODER_ID_INTERNAL_LVDS,
+ ENCODER_ID_INTERNAL_TMDS1,
+ ENCODER_ID_INTERNAL_TMDS2,
+ ENCODER_ID_INTERNAL_DAC1,
+ ENCODER_ID_INTERNAL_DAC2, /* TV/CV DAC */
+
+ /* External Third Party Encoders */
+ ENCODER_ID_INTERNAL_LVTM1, /* not used for Radeon */
+ ENCODER_ID_INTERNAL_HDMI,
+
+ /* Kaledisope (KLDSCP) Class Display Hardware */
+ ENCODER_ID_INTERNAL_KLDSCP_TMDS1,
+ ENCODER_ID_INTERNAL_KLDSCP_DAC1,
+ ENCODER_ID_INTERNAL_KLDSCP_DAC2, /* Shared with CV/TV and CRT */
+ /* External TMDS (dual link) */
+ ENCODER_ID_EXTERNAL_MVPU_FPGA, /* MVPU FPGA chip */
+ ENCODER_ID_INTERNAL_DDI,
+ ENCODER_ID_INTERNAL_UNIPHY,
+ ENCODER_ID_INTERNAL_KLDSCP_LVTMA,
+ ENCODER_ID_INTERNAL_UNIPHY1,
+ ENCODER_ID_INTERNAL_UNIPHY2,
+ ENCODER_ID_EXTERNAL_NUTMEG,
+ ENCODER_ID_EXTERNAL_TRAVIS,
+
+ ENCODER_ID_INTERNAL_WIRELESS, /* Internal wireless display encoder */
+ ENCODER_ID_INTERNAL_UNIPHY3,
+ ENCODER_ID_INTERNAL_VIRTUAL,
+};
+
+/* Connector object ids */
+enum connector_id {
+ CONNECTOR_ID_UNKNOWN = 0,
+ CONNECTOR_ID_SINGLE_LINK_DVII = 1,
+ CONNECTOR_ID_DUAL_LINK_DVII = 2,
+ CONNECTOR_ID_SINGLE_LINK_DVID = 3,
+ CONNECTOR_ID_DUAL_LINK_DVID = 4,
+ CONNECTOR_ID_VGA = 5,
+ CONNECTOR_ID_HDMI_TYPE_A = 12,
+ CONNECTOR_ID_LVDS = 14,
+ CONNECTOR_ID_PCIE = 16,
+ CONNECTOR_ID_HARDCODE_DVI = 18,
+ CONNECTOR_ID_DISPLAY_PORT = 19,
+ CONNECTOR_ID_EDP = 20,
+ CONNECTOR_ID_MXM = 21,
+ CONNECTOR_ID_WIRELESS = 22,
+ CONNECTOR_ID_MIRACAST = 23,
+
+ CONNECTOR_ID_VIRTUAL = 100
+};
+
+/* Audio object ids */
+enum audio_id {
+ AUDIO_ID_UNKNOWN = 0,
+ AUDIO_ID_INTERNAL_AZALIA
+};
+
+/* Engine object ids */
+enum engine_id {
+ ENGINE_ID_DIGA,
+ ENGINE_ID_DIGB,
+ ENGINE_ID_DIGC,
+ ENGINE_ID_DIGD,
+ ENGINE_ID_DIGE,
+ ENGINE_ID_DIGF,
+ ENGINE_ID_DIGG,
+ ENGINE_ID_DACA,
+ ENGINE_ID_DACB,
+ ENGINE_ID_VCE, /* wireless display pseudo-encoder */
+ ENGINE_ID_VIRTUAL,
+
+ ENGINE_ID_COUNT,
+ ENGINE_ID_UNKNOWN = (-1L)
+};
+
+enum transmitter_color_depth {
+ TRANSMITTER_COLOR_DEPTH_24 = 0, /* 8 bits */
+ TRANSMITTER_COLOR_DEPTH_30, /* 10 bits */
+ TRANSMITTER_COLOR_DEPTH_36, /* 12 bits */
+ TRANSMITTER_COLOR_DEPTH_48 /* 16 bits */
+};
+
+/*
+ *****************************************************************************
+ * graphics_object_id struct
+ *
+ * graphics_object_id is a very simple struct wrapping 32bit Graphics
+ * Object identication
+ *
+ * This struct should stay very simple
+ * No dependencies at all (no includes)
+ * No debug messages or asserts
+ * No #ifndef and preprocessor directives
+ * No grow in space (no more data member)
+ *****************************************************************************
+ */
+
+struct graphics_object_id {
+ uint32_t id:8;
+ uint32_t enum_id:4;
+ uint32_t type:4;
+ uint32_t reserved:16; /* for padding. total size should be u32 */
+};
+
+/* some simple functions for convenient graphics_object_id handle */
+
+static inline struct graphics_object_id dal_graphics_object_id_init(
+ uint32_t id,
+ enum object_enum_id enum_id,
+ enum object_type type)
+{
+ struct graphics_object_id result = {
+ id, enum_id, type, 0
+ };
+
+ return result;
+}
+
+bool dal_graphics_object_id_is_equal(
+ struct graphics_object_id id1,
+ struct graphics_object_id id2);
+
+/* Based on internal data members memory layout */
+static inline uint32_t dal_graphics_object_id_to_uint(
+ struct graphics_object_id id)
+{
+ return id.id + (id.enum_id << 0x8) + (id.type << 0xc);
+}
+
+static inline enum controller_id dal_graphics_object_id_get_controller_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CONTROLLER)
+ return id.id;
+ return CONTROLLER_ID_UNDEFINED;
+}
+
+static inline enum clock_source_id dal_graphics_object_id_get_clock_source_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CLOCK_SOURCE)
+ return id.id;
+ return CLOCK_SOURCE_ID_UNDEFINED;
+}
+
+static inline enum encoder_id dal_graphics_object_id_get_encoder_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_ENCODER)
+ return id.id;
+ return ENCODER_ID_UNKNOWN;
+}
+
+static inline enum connector_id dal_graphics_object_id_get_connector_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_CONNECTOR)
+ return id.id;
+ return CONNECTOR_ID_UNKNOWN;
+}
+
+static inline enum audio_id dal_graphics_object_id_get_audio_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_AUDIO)
+ return id.id;
+ return AUDIO_ID_UNKNOWN;
+}
+
+static inline enum engine_id dal_graphics_object_id_get_engine_id(
+ struct graphics_object_id id)
+{
+ if (id.type == OBJECT_TYPE_ENGINE)
+ return id.id;
+ return ENGINE_ID_UNKNOWN;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
new file mode 100644
index 000000000000..13a3c82d118f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_I2CAUX_INTERFACE_H__
+#define __DAL_I2CAUX_INTERFACE_H__
+
+#include "dc_types.h"
+#include "gpio_service_interface.h"
+
+
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+#define AUX_MAX_DEFER_WRITE_RETRY 20
+
+struct aux_payload {
+ /* set following flag to read/write I2C data,
+ * reset it to read/write DPCD data */
+ bool i2c_over_aux;
+ /* set following flag to write data,
+ * reset it to read data */
+ bool write;
+ uint32_t address;
+ uint8_t length;
+ uint8_t *data;
+};
+
+struct aux_command {
+ struct aux_payload *payloads;
+ uint8_t number_of_payloads;
+
+ /* expressed in milliseconds
+ * zero means "use default value" */
+ uint32_t defer_delay;
+
+ /* zero means "use default value" */
+ uint32_t max_defer_write_retry;
+
+ enum i2c_mot_mode mot;
+};
+
+union aux_config {
+ struct {
+ uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
+ } bits;
+ uint32_t raw;
+};
+
+struct i2caux;
+
+struct i2caux *dal_i2caux_create(
+ struct dc_context *ctx);
+
+bool dal_i2caux_submit_i2c_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct i2c_command *cmd);
+
+bool dal_i2caux_submit_aux_command(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ struct aux_command *cmd);
+
+void dal_i2caux_configure_aux(
+ struct i2caux *i2caux,
+ struct ddc *ddc,
+ union aux_config cfg);
+
+void dal_i2caux_destroy(
+ struct i2caux **ptr);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/irq_service_interface.h b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
new file mode 100644
index 000000000000..d6ebed524daf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/irq_service_interface.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_INTERFACE_H__
+#define __DAL_IRQ_SERVICE_INTERFACE_H__
+
+struct irq_service_init_data {
+ struct dc_context *ctx;
+};
+
+struct irq_service;
+
+void dal_irq_service_destroy(struct irq_service **irq_service);
+
+bool dal_irq_service_set(
+ struct irq_service *irq_service,
+ enum dc_irq_source source,
+ bool enable);
+
+bool dal_irq_service_ack(
+ struct irq_service *irq_service,
+ enum dc_irq_source source);
+
+enum dc_irq_source dal_irq_service_to_irq_source(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
new file mode 100644
index 000000000000..adea1a59f620
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LINK_SERVICE_TYPES_H__
+#define __DAL_LINK_SERVICE_TYPES_H__
+
+#include "grph_object_id.h"
+#include "dal_types.h"
+#include "irq_types.h"
+
+/*struct mst_mgr_callback_object;*/
+struct ddc;
+struct irq_manager;
+
+enum {
+ MAX_CONTROLLER_NUM = 6
+};
+
+enum dp_power_state {
+ DP_POWER_STATE_D0 = 1,
+ DP_POWER_STATE_D3
+};
+
+enum edp_revision {
+ /* eDP version 1.1 or lower */
+ EDP_REVISION_11 = 0x00,
+ /* eDP version 1.2 */
+ EDP_REVISION_12 = 0x01,
+ /* eDP version 1.3 */
+ EDP_REVISION_13 = 0x02
+};
+
+enum {
+ LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/
+};
+
+enum link_training_result {
+ LINK_TRAINING_SUCCESS,
+ LINK_TRAINING_CR_FAIL,
+ /* CR DONE bit is cleared during EQ step */
+ LINK_TRAINING_EQ_FAIL_CR,
+ /* other failure during EQ step */
+ LINK_TRAINING_EQ_FAIL_EQ,
+};
+
+struct link_training_settings {
+ struct dc_link_settings link_settings;
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+ bool allow_invalid_msa_timing_param;
+};
+
+enum hw_dp_training_pattern {
+ HW_DP_TRAINING_PATTERN_1 = 0,
+ HW_DP_TRAINING_PATTERN_2,
+ HW_DP_TRAINING_PATTERN_3,
+ HW_DP_TRAINING_PATTERN_4
+};
+
+/*TODO: Move this enum test harness*/
+/* Test patterns*/
+enum dp_test_pattern {
+ /* Input data is pass through Scrambler
+ * and 8b10b Encoder straight to output*/
+ DP_TEST_PATTERN_VIDEO_MODE = 0,
+
+ /* phy test patterns*/
+ DP_TEST_PATTERN_PHY_PATTERN_BEGIN,
+ DP_TEST_PATTERN_D102 = DP_TEST_PATTERN_PHY_PATTERN_BEGIN,
+ DP_TEST_PATTERN_SYMBOL_ERROR,
+ DP_TEST_PATTERN_PRBS7,
+ DP_TEST_PATTERN_80BIT_CUSTOM,
+ DP_TEST_PATTERN_CP2520_1,
+ DP_TEST_PATTERN_CP2520_2,
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2,
+ DP_TEST_PATTERN_CP2520_3,
+
+ /* Link Training Patterns */
+ DP_TEST_PATTERN_TRAINING_PATTERN1,
+ DP_TEST_PATTERN_TRAINING_PATTERN2,
+ DP_TEST_PATTERN_TRAINING_PATTERN3,
+ DP_TEST_PATTERN_TRAINING_PATTERN4,
+ DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4,
+
+ /* link test patterns*/
+ DP_TEST_PATTERN_COLOR_SQUARES,
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA,
+ DP_TEST_PATTERN_VERTICAL_BARS,
+ DP_TEST_PATTERN_HORIZONTAL_BARS,
+ DP_TEST_PATTERN_COLOR_RAMP,
+
+ /* audio test patterns*/
+ DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED,
+ DP_TEST_PATTERN_AUDIO_SAWTOOTH,
+
+ DP_TEST_PATTERN_UNSUPPORTED
+};
+
+enum dp_panel_mode {
+ /* not required */
+ DP_PANEL_MODE_DEFAULT,
+ /* standard mode for eDP */
+ DP_PANEL_MODE_EDP,
+ /* external chips specific settings */
+ DP_PANEL_MODE_SPECIAL
+};
+
+/* DPCD_ADDR_TRAINING_LANEx_SET registers value */
+union dpcd_training_lane_set {
+ struct {
+#if defined(LITTLEENDIAN_CPU)
+ uint8_t VOLTAGE_SWING_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ /* following is reserved in DP 1.1 */
+ uint8_t POST_CURSOR2_SET:2;
+#elif defined(BIGENDIAN_CPU)
+ uint8_t POST_CURSOR2_SET:2;
+ uint8_t MAX_PRE_EMPHASIS_REACHED:1;
+ uint8_t PRE_EMPHASIS_SET:2;
+ uint8_t MAX_SWING_REACHED:1;
+ uint8_t VOLTAGE_SWING_SET:2;
+#else
+ #error ARCH not defined!
+#endif
+ } bits;
+
+ uint8_t raw;
+};
+
+
+/* DP MST stream allocation (payload bandwidth number) */
+struct dp_mst_stream_allocation {
+ uint8_t vcp_id;
+ /* number of slots required for the DP stream in
+ * transport packet */
+ uint8_t slot_count;
+};
+
+/* DP MST stream allocation table */
+struct dp_mst_stream_allocation_table {
+ /* number of DP video streams */
+ int stream_count;
+ /* array of stream allocations */
+ struct dp_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
+};
+
+#endif /*__DAL_LINK_SERVICE_TYPES_H__*/
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
new file mode 100644
index 000000000000..8e1fe70097be
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_INTERFACE_H__
+#define __DAL_LOGGER_INTERFACE_H__
+
+#include "logger_types.h"
+
+struct dc_context;
+struct dc_link;
+struct dc_surface_update;
+struct resource_context;
+struct dc_state;
+
+/*
+ *
+ * DAL logger functionality
+ *
+ */
+
+struct dal_logger *dal_logger_create(struct dc_context *ctx, uint32_t log_mask);
+
+uint32_t dal_logger_destroy(struct dal_logger **logger);
+
+void dm_logger_flush_buffer(struct dal_logger *logger, bool should_warn);
+
+void dm_logger_write(
+ struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ ...);
+
+void dm_logger_append(
+ struct log_entry *entry,
+ const char *msg,
+ ...);
+
+void dm_logger_open(
+ struct dal_logger *logger,
+ struct log_entry *entry,
+ enum dc_log_type log_type);
+
+void dm_logger_close(struct log_entry *entry);
+
+void dc_conn_log(struct dc_context *ctx,
+ const struct dc_link *link,
+ uint8_t *hex_data,
+ int hex_data_count,
+ enum dc_log_type event,
+ const char *msg,
+ ...);
+
+void logger_write(struct dal_logger *logger,
+ enum dc_log_type log_type,
+ const char *msg,
+ void *paralist);
+
+void pre_surface_trace(
+ struct dc *dc,
+ const struct dc_plane_state *const *plane_states,
+ int surface_count);
+
+void update_surface_trace(
+ struct dc *dc,
+ const struct dc_surface_update *updates,
+ int surface_count);
+
+void post_surface_trace(struct dc *dc);
+
+void context_timing_trace(
+ struct dc *dc,
+ struct resource_context *res_ctx);
+
+void context_clock_trace(
+ struct dc *dc,
+ struct dc_state *context);
+
+/* Any function which is empty or have incomplete implementation should be
+ * marked by this macro.
+ * Note that the message will be printed exactly once for every function
+ * it is used in order to avoid repeating of the same message. */
+#define DAL_LOGGER_NOT_IMPL(fmt, ...) \
+{ \
+ static bool print_not_impl = true; \
+\
+ if (print_not_impl == true) { \
+ print_not_impl = false; \
+ dm_logger_write(ctx->logger, LOG_WARNING, \
+ "DAL_NOT_IMPL: " fmt, ##__VA_ARGS__); \
+ } \
+}
+
+/******************************************************************************
+ * Convenience macros to save on typing.
+ *****************************************************************************/
+
+#define DC_ERROR(...) \
+ dm_logger_write(dc_ctx->logger, LOG_ERROR, \
+ __VA_ARGS__)
+
+#define DC_SYNC_INFO(...) \
+ dm_logger_write(dc_ctx->logger, LOG_SYNC, \
+ __VA_ARGS__)
+
+/* Connectivity log format:
+ * [time stamp] [drm] [Major_minor] [connector name] message.....
+ * eg:
+ * [ 26.590965] [drm] [Conn_LKTN] [DP-1] HBRx4 pass VS=0, PE=0^
+ * [ 26.881060] [drm] [Conn_Mode] [DP-1] {2560x1080, 2784x1111@185580Khz}^
+ */
+
+#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
+ dc_conn_log(link->ctx, link, hex_data, hex_len, \
+ LOG_EVENT_DETECTION, ##__VA_ARGS__)
+
+#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
+ dc_conn_log(link->ctx, link, hex_data, hex_len, \
+ LOG_EVENT_LINK_LOSS, ##__VA_ARGS__)
+
+#define CONN_MSG_LT(link, ...) \
+ dc_conn_log(link->ctx, link, NULL, 0, \
+ LOG_EVENT_LINK_TRAINING, ##__VA_ARGS__)
+
+#define CONN_MSG_MODE(link, ...) \
+ dc_conn_log(link->ctx, link, NULL, 0, \
+ LOG_EVENT_MODE_SET, ##__VA_ARGS__)
+
+/*
+ * Display Test Next logging
+ */
+#define DTN_INFO_BEGIN() \
+ dm_dtn_log_begin(dc_ctx)
+
+#define DTN_INFO(msg, ...) \
+ dm_dtn_log_append_v(dc_ctx, msg, ##__VA_ARGS__)
+
+#define DTN_INFO_END() \
+ dm_dtn_log_end(dc_ctx)
+
+#define PERFORMANCE_TRACE_START() \
+ unsigned long long perf_trc_start_stmp = dm_get_timestamp(dc->ctx); \
+ unsigned long long perf_trc_start_log_msk = dc->ctx->logger->mask; \
+ unsigned int perf_trc_start_log_flags = dc->ctx->logger->flags.value; \
+ if (dc->debug.performance_trace) {\
+ dm_logger_flush_buffer(dc->ctx->logger, false);\
+ dc->ctx->logger->mask = 1<<LOG_PERF_TRACE;\
+ dc->ctx->logger->flags.bits.ENABLE_CONSOLE = 0;\
+ dc->ctx->logger->flags.bits.ENABLE_BUFFER = 1;\
+ }
+
+#define PERFORMANCE_TRACE_END() do {\
+ unsigned long long perf_trc_end_stmp = dm_get_timestamp(dc->ctx);\
+ if (dc->debug.performance_trace) {\
+ dm_logger_write(dc->ctx->logger, \
+ LOG_PERF_TRACE, \
+ "%s duration: %d ticks\n", __func__,\
+ perf_trc_end_stmp - perf_trc_start_stmp); \
+ if (perf_trc_start_log_msk != 1<<LOG_PERF_TRACE) {\
+ dc->ctx->logger->mask = perf_trc_start_log_msk;\
+ dc->ctx->logger->flags.value = perf_trc_start_log_flags;\
+ dm_logger_flush_buffer(dc->ctx->logger, false);\
+ } \
+ } \
+} while (0)
+
+#endif /* __DAL_LOGGER_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
new file mode 100644
index 000000000000..e2ff8cd423d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_LOGGER_TYPES_H__
+#define __DAL_LOGGER_TYPES_H__
+
+#include "os_types.h"
+
+#define MAX_NAME_LEN 32
+
+struct dal_logger;
+
+enum dc_log_type {
+ LOG_ERROR = 0,
+ LOG_WARNING,
+ LOG_DEBUG,
+ LOG_DC,
+ LOG_DTN,
+ LOG_SURFACE,
+ LOG_HW_HOTPLUG,
+ LOG_HW_LINK_TRAINING,
+ LOG_HW_SET_MODE,
+ LOG_HW_RESUME_S3,
+ LOG_HW_AUDIO,
+ LOG_HW_HPD_IRQ,
+ LOG_MST,
+ LOG_SCALER,
+ LOG_BIOS,
+ LOG_BANDWIDTH_CALCS,
+ LOG_BANDWIDTH_VALIDATION,
+ LOG_I2C_AUX,
+ LOG_SYNC,
+ LOG_BACKLIGHT,
+ LOG_FEATURE_OVERRIDE,
+ LOG_DETECTION_EDID_PARSER,
+ LOG_DETECTION_DP_CAPS,
+ LOG_RESOURCE,
+ LOG_DML,
+ LOG_EVENT_MODE_SET,
+ LOG_EVENT_DETECTION,
+ LOG_EVENT_LINK_TRAINING,
+ LOG_EVENT_LINK_LOSS,
+ LOG_EVENT_UNDERFLOW,
+ LOG_IF_TRACE,
+ LOG_PERF_TRACE,
+
+ LOG_SECTION_TOTAL_COUNT
+};
+
+#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
+ (1 << LOG_DETECTION_EDID_PARSER))
+
+#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
+ (1 << LOG_WARNING) | \
+ (1 << LOG_EVENT_MODE_SET) | \
+ (1 << LOG_EVENT_DETECTION) | \
+ (1 << LOG_EVENT_LINK_TRAINING) | \
+ (1 << LOG_EVENT_LINK_LOSS) | \
+ (1 << LOG_EVENT_UNDERFLOW) | \
+ (1 << LOG_RESOURCE) | \
+ (1 << LOG_FEATURE_OVERRIDE) | \
+ (1 << LOG_DETECTION_EDID_PARSER) | \
+ (1 << LOG_DC) | \
+ (1 << LOG_HW_HOTPLUG) | \
+ (1 << LOG_HW_SET_MODE) | \
+ (1 << LOG_HW_RESUME_S3) | \
+ (1 << LOG_HW_HPD_IRQ) | \
+ (1 << LOG_SYNC) | \
+ (1 << LOG_BANDWIDTH_VALIDATION) | \
+ (1 << LOG_MST) | \
+ (1 << LOG_DETECTION_DP_CAPS) | \
+ (1 << LOG_BACKLIGHT)) | \
+ (1 << LOG_I2C_AUX) | \
+ (1 << LOG_IF_TRACE) | \
+ (1 << LOG_DTN) /* | \
+ (1 << LOG_DEBUG) | \
+ (1 << LOG_BIOS) | \
+ (1 << LOG_SURFACE) | \
+ (1 << LOG_SCALER) | \
+ (1 << LOG_DML) | \
+ (1 << LOG_HW_LINK_TRAINING) | \
+ (1 << LOG_HW_AUDIO)| \
+ (1 << LOG_BANDWIDTH_CALCS)*/
+
+union logger_flags {
+ struct {
+ uint32_t ENABLE_CONSOLE:1; /* Print to console */
+ uint32_t ENABLE_BUFFER:1; /* Print to buffer */
+ uint32_t RESERVED:30;
+ } bits;
+ uint32_t value;
+};
+
+struct log_entry {
+ struct dal_logger *logger;
+ enum dc_log_type type;
+
+ char *buf;
+ uint32_t buf_offset;
+ uint32_t max_buf_bytes;
+};
+
+/**
+* Structure for enumerating log types
+*/
+struct dc_log_type_info {
+ enum dc_log_type type;
+ char name[MAX_NAME_LEN];
+};
+
+/* Structure for keeping track of offsets, buffer, etc */
+
+#define DAL_LOGGER_BUFFER_MAX_SIZE 2048
+
+/*Connectivity log needs to output EDID, which needs at lease 256x3 bytes,
+ * change log line size to 896 to meet the request.
+ */
+#define LOG_MAX_LINE_SIZE 896
+
+struct dal_logger {
+
+ /* How far into the circular buffer has been read by dsat
+ * Read offset should never cross write offset. Write \0's to
+ * read data just to be sure?
+ */
+ uint32_t buffer_read_offset;
+
+ /* How far into the circular buffer we have written
+ * Write offset should never cross read offset
+ */
+ uint32_t buffer_write_offset;
+
+ uint32_t open_count;
+
+ char *log_buffer; /* Pointer to malloc'ed buffer */
+ uint32_t log_buffer_size; /* Size of circular buffer */
+
+ uint32_t mask; /*array of masks for major elements*/
+
+ union logger_flags flags;
+ struct dc_context *ctx;
+};
+
+#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/set_mode_types.h b/drivers/gpu/drm/amd/display/include/set_mode_types.h
new file mode 100644
index 000000000000..fee2b6ffcfc1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/set_mode_types.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_SET_MODE_TYPES_H__
+#define __DAL_SET_MODE_TYPES_H__
+
+#include "dc_types.h"
+#include <linux/hdmi.h>
+
+/* Info frame packet status */
+enum info_frame_flag {
+ INFO_PACKET_PACKET_INVALID = 0,
+ INFO_PACKET_PACKET_VALID = 1,
+ INFO_PACKET_PACKET_RESET = 2,
+ INFO_PACKET_PACKET_UPDATE_SCAN_TYPE = 8
+};
+
+struct hdmi_info_frame_header {
+ uint8_t info_frame_type;
+ uint8_t version;
+ uint8_t length;
+};
+
+#pragma pack(push)
+#pragma pack(1)
+
+struct info_packet_raw_data {
+ uint8_t hb0;
+ uint8_t hb1;
+ uint8_t hb2;
+ uint8_t sb[28]; /* sb0~sb27 */
+};
+
+union hdmi_info_packet {
+ struct avi_info_frame {
+ struct hdmi_info_frame_header header;
+
+ uint8_t CHECK_SUM:8;
+
+ uint8_t S0_S1:2;
+ uint8_t B0_B1:2;
+ uint8_t A0:1;
+ uint8_t Y0_Y1_Y2:3;
+
+ uint8_t R0_R3:4;
+ uint8_t M0_M1:2;
+ uint8_t C0_C1:2;
+
+ uint8_t SC0_SC1:2;
+ uint8_t Q0_Q1:2;
+ uint8_t EC0_EC2:3;
+ uint8_t ITC:1;
+
+ uint8_t VIC0_VIC7:8;
+
+ uint8_t PR0_PR3:4;
+ uint8_t CN0_CN1:2;
+ uint8_t YQ0_YQ1:2;
+
+ uint16_t bar_top;
+ uint16_t bar_bottom;
+ uint16_t bar_left;
+ uint16_t bar_right;
+
+ uint8_t reserved[14];
+ } bits;
+
+ struct info_packet_raw_data packet_raw_data;
+};
+
+struct info_packet {
+ enum info_frame_flag flags;
+ union hdmi_info_packet info_packet_hdmi;
+};
+
+struct info_frame {
+ struct info_packet avi_info_packet;
+ struct info_packet gamut_packet;
+ struct info_packet vendor_info_packet;
+ struct info_packet spd_info_packet;
+};
+
+#pragma pack(pop)
+
+#endif /* __DAL_SET_MODE_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
new file mode 100644
index 000000000000..b5ebde642207
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_SIGNAL_TYPES_H__
+#define __DC_SIGNAL_TYPES_H__
+
+enum signal_type {
+ SIGNAL_TYPE_NONE = 0L, /* no signal */
+ SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
+ SIGNAL_TYPE_DVI_DUAL_LINK = (1 << 1),
+ SIGNAL_TYPE_HDMI_TYPE_A = (1 << 2),
+ SIGNAL_TYPE_LVDS = (1 << 3),
+ SIGNAL_TYPE_RGB = (1 << 4),
+ SIGNAL_TYPE_DISPLAY_PORT = (1 << 5),
+ SIGNAL_TYPE_DISPLAY_PORT_MST = (1 << 6),
+ SIGNAL_TYPE_EDP = (1 << 7),
+ SIGNAL_TYPE_VIRTUAL = (1 << 9), /* Virtual Display */
+};
+
+/* help functions for signal types manipulation */
+static inline bool dc_is_hdmi_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
+}
+
+static inline bool dc_is_dp_sst_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_EDP);
+}
+
+static inline bool dc_is_dp_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_EDP ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+}
+
+static inline bool dc_is_embedded_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_EDP || signal == SIGNAL_TYPE_LVDS);
+}
+
+static inline bool dc_is_dvi_signal(enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return true;
+ break;
+ default:
+ return false;
+ }
+}
+
+static inline bool dc_is_dvi_single_link_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK);
+}
+
+static inline bool dc_is_dual_link_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DVI_DUAL_LINK);
+}
+
+static inline bool dc_is_audio_capable_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ dc_is_hdmi_signal(signal));
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/include/vector.h b/drivers/gpu/drm/amd/display/include/vector.h
new file mode 100644
index 000000000000..8233b7c22a07
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/include/vector.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_VECTOR_H__
+#define __DAL_VECTOR_H__
+
+struct vector {
+ uint8_t *container;
+ uint32_t struct_size;
+ uint32_t count;
+ uint32_t capacity;
+ struct dc_context *ctx;
+};
+
+bool dal_vector_construct(
+ struct vector *vector,
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size);
+
+struct vector *dal_vector_create(
+ struct dc_context *ctx,
+ uint32_t capacity,
+ uint32_t struct_size);
+
+/* 'initial_value' is optional. If initial_value not supplied,
+ * each "structure" in the vector will contain zeros by default. */
+struct vector *dal_vector_presized_create(
+ struct dc_context *ctx,
+ uint32_t size,
+ void *initial_value,
+ uint32_t struct_size);
+
+void dal_vector_destruct(
+ struct vector *vector);
+
+void dal_vector_destroy(
+ struct vector **vector);
+
+uint32_t dal_vector_get_count(
+ const struct vector *vector);
+
+/* dal_vector_insert_at
+ * reallocate container if necessary
+ * then shell items at right and insert
+ * return if the container modified
+ * do not check that index belongs to container
+ * since the function is private and index is going to be calculated
+ * either with by function or as get_count+1 */
+bool dal_vector_insert_at(
+ struct vector *vector,
+ const void *what,
+ uint32_t position);
+
+bool dal_vector_append(
+ struct vector *vector,
+ const void *item);
+
+/* operator[] */
+void *dal_vector_at_index(
+ const struct vector *vector,
+ uint32_t index);
+
+void dal_vector_set_at_index(
+ const struct vector *vector,
+ const void *what,
+ uint32_t index);
+
+/* create a clone (copy) of a vector */
+struct vector *dal_vector_clone(
+ const struct vector *vector_other);
+
+/* dal_vector_remove_at_index
+ * Shifts elements on the right from remove position to the left,
+ * removing an element at position by overwrite means*/
+bool dal_vector_remove_at_index(
+ struct vector *vector,
+ uint32_t index);
+
+uint32_t dal_vector_capacity(const struct vector *vector);
+
+bool dal_vector_reserve(struct vector *vector, uint32_t capacity);
+
+void dal_vector_clear(struct vector *vector);
+
+/***************************************************************************
+ * Macro definitions of TYPE-SAFE versions of vector set/get functions.
+ ***************************************************************************/
+
+#define DAL_VECTOR_INSERT_AT(vector_type, type_t) \
+ static bool vector_type##_vector_insert_at( \
+ struct vector *vector, \
+ type_t what, \
+ uint32_t position) \
+{ \
+ return dal_vector_insert_at(vector, what, position); \
+}
+
+#define DAL_VECTOR_APPEND(vector_type, type_t) \
+ static bool vector_type##_vector_append( \
+ struct vector *vector, \
+ type_t item) \
+{ \
+ return dal_vector_append(vector, item); \
+}
+
+/* Note: "type_t" is the ONLY token accepted by "checkpatch.pl" and by
+ * "checkcommit" as *return type*.
+ * For uniformity reasons "type_t" is used for all type-safe macro
+ * definitions here. */
+#define DAL_VECTOR_AT_INDEX(vector_type, type_t) \
+ static type_t vector_type##_vector_at_index( \
+ const struct vector *vector, \
+ uint32_t index) \
+{ \
+ return dal_vector_at_index(vector, index); \
+}
+
+#define DAL_VECTOR_SET_AT_INDEX(vector_type, type_t) \
+ static void vector_type##_vector_set_at_index( \
+ const struct vector *vector, \
+ type_t what, \
+ uint32_t index) \
+{ \
+ dal_vector_set_at_index(vector, what, index); \
+}
+
+#endif /* __DAL_VECTOR_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/Makefile b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
new file mode 100644
index 000000000000..db8e0ff6d7a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the 'freesync' sub-module of DAL.
+#
+
+FREESYNC = freesync.o
+
+AMD_DAL_FREESYNC = $(addprefix $(AMDDALPATH)/modules/freesync/,$(FREESYNC))
+#$(info ************ DAL-FREE SYNC_MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_FREESYNC)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
new file mode 100644
index 000000000000..4d7db4aa28e0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -0,0 +1,1483 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "mod_freesync.h"
+#include "core_types.h"
+
+#define MOD_FREESYNC_MAX_CONCURRENT_STREAMS 32
+
+/* Refresh rate ramp at a fixed rate of 65 Hz/second */
+#define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65)
+/* Number of elements in the render times cache array */
+#define RENDER_TIMES_MAX_COUNT 20
+/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
+#define BTR_EXIT_MARGIN 2000
+/* Number of consecutive frames to check before entering/exiting fixed refresh*/
+#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
+#define FIXED_REFRESH_EXIT_FRAME_COUNT 5
+
+#define FREESYNC_REGISTRY_NAME "freesync_v1"
+
+#define FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY "DalFreeSyncNoStaticForExternalDp"
+
+#define FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY "DalFreeSyncNoStaticForInternal"
+
+struct gradual_static_ramp {
+ bool ramp_is_active;
+ bool ramp_direction_is_up;
+ unsigned int ramp_current_frame_duration_in_ns;
+};
+
+struct time_cache {
+ /* video (48Hz feature) related */
+ unsigned int update_duration_in_ns;
+
+ /* BTR/fixed refresh related */
+ unsigned int prev_time_stamp_in_us;
+
+ unsigned int min_render_time_in_us;
+ unsigned int max_render_time_in_us;
+
+ unsigned int render_times_index;
+ unsigned int render_times[RENDER_TIMES_MAX_COUNT];
+};
+
+struct below_the_range {
+ bool btr_active;
+ bool program_btr;
+
+ unsigned int mid_point_in_us;
+
+ unsigned int inserted_frame_duration_in_us;
+ unsigned int frames_to_insert;
+ unsigned int frame_counter;
+};
+
+struct fixed_refresh {
+ bool fixed_active;
+ bool program_fixed;
+ unsigned int frame_counter;
+};
+
+struct freesync_range {
+ unsigned int min_refresh;
+ unsigned int max_frame_duration;
+ unsigned int vmax;
+
+ unsigned int max_refresh;
+ unsigned int min_frame_duration;
+ unsigned int vmin;
+};
+
+struct freesync_state {
+ bool fullscreen;
+ bool static_screen;
+ bool video;
+
+ unsigned int nominal_refresh_rate_in_micro_hz;
+ bool windowed_fullscreen;
+
+ struct time_cache time;
+
+ struct gradual_static_ramp static_ramp;
+ struct below_the_range btr;
+ struct fixed_refresh fixed_refresh;
+ struct freesync_range freesync_range;
+};
+
+struct freesync_entity {
+ struct dc_stream_state *stream;
+ struct mod_freesync_caps *caps;
+ struct freesync_state state;
+ struct mod_freesync_user_enable user_enable;
+};
+
+struct freesync_registry_options {
+ bool drr_external_supported;
+ bool drr_internal_supported;
+};
+
+struct core_freesync {
+ struct mod_freesync public;
+ struct dc *dc;
+ struct freesync_entity *map;
+ int num_entities;
+ struct freesync_registry_options opts;
+};
+
+#define MOD_FREESYNC_TO_CORE(mod_freesync)\
+ container_of(mod_freesync, struct core_freesync, public)
+
+static bool check_dc_support(const struct dc *dc)
+{
+ if (dc->stream_funcs.adjust_vmin_vmax == NULL)
+ return false;
+
+ return true;
+}
+
+struct mod_freesync *mod_freesync_create(struct dc *dc)
+{
+ struct core_freesync *core_freesync =
+ kzalloc(sizeof(struct core_freesync), GFP_KERNEL);
+
+
+ struct persistent_data_flag flag;
+
+ int i, data = 0;
+
+ if (core_freesync == NULL)
+ goto fail_alloc_context;
+
+ core_freesync->map = kzalloc(sizeof(struct freesync_entity) * MOD_FREESYNC_MAX_CONCURRENT_STREAMS,
+ GFP_KERNEL);
+
+ if (core_freesync->map == NULL)
+ goto fail_alloc_map;
+
+ for (i = 0; i < MOD_FREESYNC_MAX_CONCURRENT_STREAMS; i++)
+ core_freesync->map[i].stream = NULL;
+
+ core_freesync->num_entities = 0;
+
+ if (dc == NULL)
+ goto fail_construct;
+
+ core_freesync->dc = dc;
+
+ if (!check_dc_support(dc))
+ goto fail_construct;
+
+ /* Create initial module folder in registry for freesync enable data */
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+ dm_write_persistent_data(dc->ctx, NULL, FREESYNC_REGISTRY_NAME,
+ NULL, NULL, 0, &flag);
+ flag.save_per_edid = false;
+ flag.save_per_link = false;
+
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_INTERNAL_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_internal_supported =
+ (data & 1) ? false : true;
+ }
+
+ if (dm_read_persistent_data(dc->ctx, NULL, NULL,
+ FREESYNC_NO_STATIC_FOR_EXTERNAL_DP_REGKEY,
+ &data, sizeof(data), &flag)) {
+ core_freesync->opts.drr_external_supported =
+ (data & 1) ? false : true;
+ }
+
+ return &core_freesync->public;
+
+fail_construct:
+ kfree(core_freesync->map);
+
+fail_alloc_map:
+ kfree(core_freesync);
+
+fail_alloc_context:
+ return NULL;
+}
+
+void mod_freesync_destroy(struct mod_freesync *mod_freesync)
+{
+ if (mod_freesync != NULL) {
+ int i;
+ struct core_freesync *core_freesync =
+ MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (i = 0; i < core_freesync->num_entities; i++)
+ if (core_freesync->map[i].stream)
+ dc_stream_release(core_freesync->map[i].stream);
+
+ kfree(core_freesync->map);
+
+ kfree(core_freesync);
+ }
+}
+
+/* Given a specific dc_stream* this function finds its equivalent
+ * on the core_freesync->map and returns the corresponding index
+ */
+static unsigned int map_index_from_stream(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index = 0;
+
+ for (index = 0; index < core_freesync->num_entities; index++) {
+ if (core_freesync->map[index].stream == stream) {
+ return index;
+ }
+ }
+ /* Could not find stream requested */
+ ASSERT(false);
+ return index;
+}
+
+bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream, struct mod_freesync_caps *caps)
+{
+ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+ int persistent_freesync_enable = 0;
+ struct persistent_data_flag flag;
+ unsigned int nom_refresh_rate_uhz;
+ unsigned long long temp;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+
+ if (core_freesync->num_entities < MOD_FREESYNC_MAX_CONCURRENT_STREAMS) {
+
+ dc_stream_retain(stream);
+
+ temp = stream->timing.pix_clk_khz;
+ temp *= 1000ULL * 1000ULL * 1000ULL;
+ temp = div_u64(temp, stream->timing.h_total);
+ temp = div_u64(temp, stream->timing.v_total);
+
+ nom_refresh_rate_uhz = (unsigned int) temp;
+
+ core_freesync->map[core_freesync->num_entities].stream = stream;
+ core_freesync->map[core_freesync->num_entities].caps = caps;
+
+ core_freesync->map[core_freesync->num_entities].state.
+ fullscreen = false;
+ core_freesync->map[core_freesync->num_entities].state.
+ static_screen = false;
+ core_freesync->map[core_freesync->num_entities].state.
+ video = false;
+ core_freesync->map[core_freesync->num_entities].state.time.
+ update_duration_in_ns = 0;
+ core_freesync->map[core_freesync->num_entities].state.
+ static_ramp.ramp_is_active = false;
+
+ /* get persistent data from registry */
+ if (dm_read_persistent_data(dc->ctx, stream->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable", &persistent_freesync_enable,
+ sizeof(int), &flag)) {
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_gaming =
+ (persistent_freesync_enable & 1) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_static =
+ (persistent_freesync_enable & 2) ? true : false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_video =
+ (persistent_freesync_enable & 4) ? true : false;
+ } else {
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_gaming = false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_static = false;
+ core_freesync->map[core_freesync->num_entities].user_enable.
+ enable_for_video = false;
+ }
+
+ if (caps->supported &&
+ nom_refresh_rate_uhz >= caps->min_refresh_in_micro_hz &&
+ nom_refresh_rate_uhz <= caps->max_refresh_in_micro_hz)
+ stream->ignore_msa_timing_param = 1;
+
+ core_freesync->num_entities++;
+ return true;
+ }
+ return false;
+}
+
+bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream)
+{
+ int i = 0;
+ struct core_freesync *core_freesync = NULL;
+ unsigned int index = 0;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ dc_stream_release(core_freesync->map[index].stream);
+ core_freesync->map[index].stream = NULL;
+ /* To remove this entity, shift everything after down */
+ for (i = index; i < core_freesync->num_entities - 1; i++)
+ core_freesync->map[i] = core_freesync->map[i + 1];
+ core_freesync->num_entities--;
+ return true;
+}
+
+static void update_stream_freesync_context(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index;
+ struct freesync_context *ctx;
+
+ ctx = &stream->freesync_ctx;
+
+ index = map_index_from_stream(core_freesync, stream);
+
+ ctx->supported = core_freesync->map[index].caps->supported;
+ ctx->enabled = (core_freesync->map[index].user_enable.enable_for_gaming ||
+ core_freesync->map[index].user_enable.enable_for_video ||
+ core_freesync->map[index].user_enable.enable_for_static);
+ ctx->active = (core_freesync->map[index].state.fullscreen ||
+ core_freesync->map[index].state.video ||
+ core_freesync->map[index].state.static_ramp.ramp_is_active);
+ ctx->min_refresh_in_micro_hz =
+ core_freesync->map[index].caps->min_refresh_in_micro_hz;
+ ctx->nominal_refresh_in_micro_hz = core_freesync->
+ map[index].state.nominal_refresh_rate_in_micro_hz;
+
+}
+
+static void update_stream(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream)
+{
+ unsigned int index = map_index_from_stream(core_freesync, stream);
+ if (core_freesync->map[index].caps->supported) {
+ stream->ignore_msa_timing_param = 1;
+ update_stream_freesync_context(core_freesync, stream);
+ }
+}
+
+static void calc_freesync_range(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream,
+ struct freesync_state *state,
+ unsigned int min_refresh_in_uhz,
+ unsigned int max_refresh_in_uhz)
+{
+ unsigned int min_frame_duration_in_ns = 0, max_frame_duration_in_ns = 0;
+ unsigned int index = map_index_from_stream(core_freesync, stream);
+ uint32_t vtotal = stream->timing.v_total;
+
+ if ((min_refresh_in_uhz == 0) || (max_refresh_in_uhz == 0)) {
+ state->freesync_range.min_refresh =
+ state->nominal_refresh_rate_in_micro_hz;
+ state->freesync_range.max_refresh =
+ state->nominal_refresh_rate_in_micro_hz;
+
+ state->freesync_range.max_frame_duration = 0;
+ state->freesync_range.min_frame_duration = 0;
+
+ state->freesync_range.vmax = vtotal;
+ state->freesync_range.vmin = vtotal;
+
+ return;
+ }
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ max_refresh_in_uhz)));
+ max_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ min_refresh_in_uhz)));
+
+ state->freesync_range.min_refresh = min_refresh_in_uhz;
+ state->freesync_range.max_refresh = max_refresh_in_uhz;
+
+ state->freesync_range.max_frame_duration = max_frame_duration_in_ns;
+ state->freesync_range.min_frame_duration = min_frame_duration_in_ns;
+
+ state->freesync_range.vmax = div64_u64(div64_u64(((unsigned long long)(
+ max_frame_duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+ state->freesync_range.vmin = div64_u64(div64_u64(((unsigned long long)(
+ min_frame_duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+
+ /* vmin/vmax cannot be less than vtotal */
+ if (state->freesync_range.vmin < vtotal) {
+ /* Error of 1 is permissible */
+ ASSERT((state->freesync_range.vmin + 1) >= vtotal);
+ state->freesync_range.vmin = vtotal;
+ }
+
+ if (state->freesync_range.vmax < vtotal) {
+ /* Error of 1 is permissible */
+ ASSERT((state->freesync_range.vmax + 1) >= vtotal);
+ state->freesync_range.vmax = vtotal;
+ }
+
+ /* Determine whether BTR can be supported */
+ if (max_frame_duration_in_ns >=
+ 2 * min_frame_duration_in_ns)
+ core_freesync->map[index].caps->btr_supported = true;
+ else
+ core_freesync->map[index].caps->btr_supported = false;
+
+ /* Cache the time variables */
+ state->time.max_render_time_in_us =
+ max_frame_duration_in_ns / 1000;
+ state->time.min_render_time_in_us =
+ min_frame_duration_in_ns / 1000;
+ state->btr.mid_point_in_us =
+ (max_frame_duration_in_ns +
+ min_frame_duration_in_ns) / 2000;
+}
+
+static void calc_v_total_from_duration(struct dc_stream_state *stream,
+ unsigned int duration_in_ns, int *v_total_nominal)
+{
+ *v_total_nominal = div64_u64(div64_u64(((unsigned long long)(
+ duration_in_ns) * stream->timing.pix_clk_khz),
+ stream->timing.h_total), 1000000);
+}
+
+static void calc_v_total_for_static_ramp(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream,
+ unsigned int index, int *v_total)
+{
+ unsigned int frame_duration = 0;
+
+ struct gradual_static_ramp *static_ramp_variables =
+ &core_freesync->map[index].state.static_ramp;
+
+ /* Calc ratio between new and current frame duration with 3 digit */
+ unsigned int frame_duration_ratio = div64_u64(1000000,
+ (1000 + div64_u64(((unsigned long long)(
+ STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME) *
+ static_ramp_variables->ramp_current_frame_duration_in_ns),
+ 1000000000)));
+
+ /* Calculate delta between new and current frame duration in ns */
+ unsigned int frame_duration_delta = div64_u64(((unsigned long long)(
+ static_ramp_variables->ramp_current_frame_duration_in_ns) *
+ (1000 - frame_duration_ratio)), 1000);
+
+ /* Adjust frame duration delta based on ratio between current and
+ * standard frame duration (frame duration at 60 Hz refresh rate).
+ */
+ unsigned int ramp_rate_interpolated = div64_u64(((unsigned long long)(
+ frame_duration_delta) * static_ramp_variables->
+ ramp_current_frame_duration_in_ns), 16666666);
+
+ /* Going to a higher refresh rate (lower frame duration) */
+ if (static_ramp_variables->ramp_direction_is_up) {
+ /* reduce frame duration */
+ static_ramp_variables->ramp_current_frame_duration_in_ns -=
+ ramp_rate_interpolated;
+
+ /* min frame duration */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ core_freesync->map[index].state.
+ nominal_refresh_rate_in_micro_hz)));
+
+ /* adjust for frame duration below min */
+ if (static_ramp_variables->ramp_current_frame_duration_in_ns <=
+ frame_duration) {
+
+ static_ramp_variables->ramp_is_active = false;
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns =
+ frame_duration;
+ }
+ /* Going to a lower refresh rate (larger frame duration) */
+ } else {
+ /* increase frame duration */
+ static_ramp_variables->ramp_current_frame_duration_in_ns +=
+ ramp_rate_interpolated;
+
+ /* max frame duration */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ core_freesync->map[index].caps->min_refresh_in_micro_hz)));
+
+ /* adjust for frame duration above max */
+ if (static_ramp_variables->ramp_current_frame_duration_in_ns >=
+ frame_duration) {
+
+ static_ramp_variables->ramp_is_active = false;
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns =
+ frame_duration;
+ }
+ }
+
+ calc_v_total_from_duration(stream, static_ramp_variables->
+ ramp_current_frame_duration_in_ns, v_total);
+}
+
+static void reset_freesync_state_variables(struct freesync_state* state)
+{
+ state->static_ramp.ramp_is_active = false;
+ if (state->nominal_refresh_rate_in_micro_hz)
+ state->static_ramp.ramp_current_frame_duration_in_ns =
+ ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ state->btr.btr_active = false;
+ state->btr.frame_counter = 0;
+ state->btr.frames_to_insert = 0;
+ state->btr.inserted_frame_duration_in_us = 0;
+ state->btr.program_btr = false;
+
+ state->fixed_refresh.fixed_active = false;
+ state->fixed_refresh.program_fixed = false;
+}
+/*
+ * Sets freesync mode on a stream depending on current freesync state.
+ */
+static bool set_freesync_on_streams(struct core_freesync *core_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ int v_total_nominal = 0, v_total_min = 0, v_total_max = 0;
+ unsigned int stream_idx, map_index = 0;
+ struct freesync_state *state;
+
+ if (num_streams == 0 || streams == NULL || num_streams > 1)
+ return false;
+
+ for (stream_idx = 0; stream_idx < num_streams; stream_idx++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_idx]);
+
+ state = &core_freesync->map[map_index].state;
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ /* Fullscreen has the topmost priority. If the
+ * fullscreen bit is set, we are in a fullscreen
+ * application where it should not matter if it is
+ * static screen. We should not check the static_screen
+ * or video bit.
+ *
+ * Special cases of fullscreen include btr and fixed
+ * refresh. We program btr on every flip and involves
+ * programming full range right before the last inserted frame.
+ * However, we do not want to program the full freesync range
+ * when fixed refresh is active, because we only program
+ * that logic once and this will override it.
+ */
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_gaming == true &&
+ state->fullscreen == true &&
+ state->fixed_refresh.fixed_active == false) {
+ /* Enable freesync */
+
+ v_total_min = state->freesync_range.vmin;
+ v_total_max = state->freesync_range.vmax;
+
+ /* Update the freesync context for the stream */
+ update_stream_freesync_context(core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(core_freesync->dc, streams,
+ num_streams, v_total_min,
+ v_total_max);
+
+ return true;
+
+ } else if (core_freesync->map[map_index].user_enable.
+ enable_for_video && state->video == true) {
+ /* Enable 48Hz feature */
+
+ calc_v_total_from_duration(streams[stream_idx],
+ state->time.update_duration_in_ns,
+ &v_total_nominal);
+
+ /* Program only if v_total_nominal is in range*/
+ if (v_total_nominal >=
+ streams[stream_idx]->timing.v_total) {
+
+ /* Update the freesync context for
+ * the stream
+ */
+ update_stream_freesync_context(
+ core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+ }
+ return true;
+
+ } else {
+ /* Disable freesync */
+ v_total_nominal = streams[stream_idx]->
+ timing.v_total;
+
+ /* Update the freesync context for
+ * the stream
+ */
+ update_stream_freesync_context(
+ core_freesync,
+ streams[stream_idx]);
+
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+
+ /* Reset the cached variables */
+ reset_freesync_state_variables(state);
+
+ return true;
+ }
+ } else {
+ /* Disable freesync */
+ v_total_nominal = streams[stream_idx]->
+ timing.v_total;
+ /*
+ * we have to reset drr always even sink does
+ * not support freesync because a former stream has
+ * be programmed
+ */
+ core_freesync->dc->stream_funcs.
+ adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total_nominal,
+ v_total_nominal);
+ /* Reset the cached variables */
+ reset_freesync_state_variables(state);
+ }
+
+ }
+
+ return false;
+}
+
+static void set_static_ramp_variables(struct core_freesync *core_freesync,
+ unsigned int index, bool enable_static_screen)
+{
+ unsigned int frame_duration = 0;
+ unsigned int nominal_refresh_rate = core_freesync->map[index].state.
+ nominal_refresh_rate_in_micro_hz;
+ unsigned int min_refresh_rate= core_freesync->map[index].caps->
+ min_refresh_in_micro_hz;
+ struct gradual_static_ramp *static_ramp_variables =
+ &core_freesync->map[index].state.static_ramp;
+
+ /* If we are ENABLING static screen, refresh rate should go DOWN.
+ * If we are DISABLING static screen, refresh rate should go UP.
+ */
+ if (enable_static_screen)
+ static_ramp_variables->ramp_direction_is_up = false;
+ else
+ static_ramp_variables->ramp_direction_is_up = true;
+
+ /* If ramp is not active, set initial frame duration depending on
+ * whether we are enabling/disabling static screen mode. If the ramp is
+ * already active, ramp should continue in the opposite direction
+ * starting with the current frame duration
+ */
+ if (!static_ramp_variables->ramp_is_active) {
+ if (enable_static_screen == true) {
+ /* Going to lower refresh rate, so start from max
+ * refresh rate (min frame duration)
+ */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ nominal_refresh_rate)));
+ } else {
+ /* Going to higher refresh rate, so start from min
+ * refresh rate (max frame duration)
+ */
+ frame_duration = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ min_refresh_rate)));
+ }
+ static_ramp_variables->
+ ramp_current_frame_duration_in_ns = frame_duration;
+
+ static_ramp_variables->ramp_is_active = true;
+ }
+}
+
+void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ unsigned int index, v_total, inserted_frame_v_total = 0;
+ unsigned int min_frame_duration_in_ns, vmax, vmin = 0;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ if (core_freesync->num_entities == 0)
+ return;
+
+ index = map_index_from_stream(core_freesync,
+ streams[0]);
+
+ if (core_freesync->map[index].caps->supported == false)
+ return;
+
+ state = &core_freesync->map[index].state;
+
+ /* Below the Range Logic */
+
+ /* Only execute if in fullscreen mode */
+ if (state->fullscreen == true &&
+ core_freesync->map[index].user_enable.enable_for_gaming &&
+ core_freesync->map[index].caps->btr_supported &&
+ state->btr.btr_active) {
+
+ /* TODO: pass in flag for Pre-DCE12 ASIC
+ * in order for frame variable duration to take affect,
+ * it needs to be done one VSYNC early, which is at
+ * frameCounter == 1.
+ * For DCE12 and newer updates to V_TOTAL_MIN/MAX
+ * will take affect on current frame
+ */
+ if (state->btr.frames_to_insert == state->btr.frame_counter) {
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ vmin = state->freesync_range.vmin;
+
+ inserted_frame_v_total = vmin;
+
+ if (min_frame_duration_in_ns / 1000)
+ inserted_frame_v_total =
+ state->btr.inserted_frame_duration_in_us *
+ vmin / (min_frame_duration_in_ns / 1000);
+
+ /* Set length of inserted frames as v_total_max*/
+ vmax = inserted_frame_v_total;
+ vmin = inserted_frame_v_total;
+
+ /* Program V_TOTAL */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, vmin, vmax);
+ }
+
+ if (state->btr.frame_counter > 0)
+ state->btr.frame_counter--;
+
+ /* Restore FreeSync */
+ if (state->btr.frame_counter == 0)
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+ }
+
+ /* If in fullscreen freesync mode or in video, do not program
+ * static screen ramp values
+ */
+ if (state->fullscreen == true || state->video == true) {
+
+ state->static_ramp.ramp_is_active = false;
+
+ return;
+ }
+
+ /* Gradual Static Screen Ramping Logic */
+
+ /* Execute if ramp is active and user enabled freesync static screen*/
+ if (state->static_ramp.ramp_is_active &&
+ core_freesync->map[index].user_enable.enable_for_static) {
+
+ calc_v_total_for_static_ramp(core_freesync, streams[0],
+ index, &v_total);
+
+ /* Update the freesync context for the stream */
+ update_stream_freesync_context(core_freesync, streams[0]);
+
+ /* Program static screen ramp values */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, streams,
+ num_streams, v_total,
+ v_total);
+
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+ }
+}
+
+void mod_freesync_update_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_params *freesync_params)
+{
+ bool freesync_program_required = false;
+ unsigned int stream_index;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ if (core_freesync->num_entities == 0)
+ return;
+
+ for(stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ unsigned int map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ bool is_embedded = dc_is_embedded_signal(
+ streams[stream_index]->sink->sink_signal);
+
+ struct freesync_registry_options *opts = &core_freesync->opts;
+
+ state = &core_freesync->map[map_index].state;
+
+ switch (freesync_params->state){
+ case FREESYNC_STATE_FULLSCREEN:
+ state->fullscreen = freesync_params->enable;
+ freesync_program_required = true;
+ state->windowed_fullscreen =
+ freesync_params->windowed_fullscreen;
+ break;
+ case FREESYNC_STATE_STATIC_SCREEN:
+ /* Static screen ramp is disabled by default, but can
+ * be enabled through regkey.
+ */
+ if ((is_embedded && opts->drr_internal_supported) ||
+ (!is_embedded && opts->drr_external_supported))
+
+ if (state->static_screen !=
+ freesync_params->enable) {
+
+ /* Change the state flag */
+ state->static_screen =
+ freesync_params->enable;
+
+ /* Update static screen ramp */
+ set_static_ramp_variables(core_freesync,
+ map_index,
+ freesync_params->enable);
+ }
+ /* We program the ramp starting next VUpdate */
+ break;
+ case FREESYNC_STATE_VIDEO:
+ /* Change core variables only if there is a change*/
+ if(freesync_params->update_duration_in_ns !=
+ state->time.update_duration_in_ns) {
+
+ state->video = freesync_params->enable;
+ state->time.update_duration_in_ns =
+ freesync_params->update_duration_in_ns;
+
+ freesync_program_required = true;
+ }
+ break;
+ case FREESYNC_STATE_NONE:
+ /* handle here to avoid warning */
+ break;
+ }
+ }
+
+ /* Update mask */
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+
+ if (freesync_program_required)
+ /* Program freesync according to current state*/
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+}
+
+
+bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_params *freesync_params)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ if (core_freesync->map[index].state.fullscreen) {
+ freesync_params->state = FREESYNC_STATE_FULLSCREEN;
+ freesync_params->enable = true;
+ } else if (core_freesync->map[index].state.static_screen) {
+ freesync_params->state = FREESYNC_STATE_STATIC_SCREEN;
+ freesync_params->enable = true;
+ } else if (core_freesync->map[index].state.video) {
+ freesync_params->state = FREESYNC_STATE_VIDEO;
+ freesync_params->enable = true;
+ } else {
+ freesync_params->state = FREESYNC_STATE_NONE;
+ freesync_params->enable = false;
+ }
+
+ freesync_params->update_duration_in_ns =
+ core_freesync->map[index].state.time.update_duration_in_ns;
+
+ freesync_params->windowed_fullscreen =
+ core_freesync->map[index].state.windowed_fullscreen;
+
+ return true;
+}
+
+bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_user_enable *user_enable)
+{
+ unsigned int stream_index, map_index;
+ int persistent_data = 0;
+ struct persistent_data_flag flag;
+ struct dc *dc = NULL;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ dc = core_freesync->dc;
+
+ flag.save_per_edid = true;
+ flag.save_per_link = false;
+
+ for(stream_index = 0; stream_index < num_streams;
+ stream_index++){
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ core_freesync->map[map_index].user_enable = *user_enable;
+
+ /* Write persistent data in registry*/
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_gaming)
+ persistent_data = persistent_data | 1;
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_static)
+ persistent_data = persistent_data | 2;
+ if (core_freesync->map[map_index].user_enable.
+ enable_for_video)
+ persistent_data = persistent_data | 4;
+
+ dm_write_persistent_data(dc->ctx,
+ streams[stream_index]->sink,
+ FREESYNC_REGISTRY_NAME,
+ "userenable",
+ &persistent_data,
+ sizeof(int),
+ &flag);
+ }
+
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+
+ return true;
+}
+
+bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_user_enable *user_enable)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *user_enable = core_freesync->map[index].user_enable;
+
+ return true;
+}
+
+bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ bool *is_ramp_active)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *is_ramp_active =
+ core_freesync->map[index].state.static_ramp.ramp_is_active;
+
+ return true;
+}
+
+bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *streams,
+ unsigned int min_refresh,
+ unsigned int max_refresh,
+ struct mod_freesync_caps *caps)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync;
+ struct freesync_state *state;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, streams);
+ state = &core_freesync->map[index].state;
+
+ if (max_refresh == 0)
+ max_refresh = state->nominal_refresh_rate_in_micro_hz;
+
+ if (min_refresh == 0) {
+ /* Restore defaults */
+ calc_freesync_range(core_freesync, streams, state,
+ core_freesync->map[index].caps->
+ min_refresh_in_micro_hz,
+ state->nominal_refresh_rate_in_micro_hz);
+ } else {
+ calc_freesync_range(core_freesync, streams,
+ state,
+ min_refresh,
+ max_refresh);
+
+ /* Program vtotal min/max */
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, &streams, 1,
+ state->freesync_range.vmin,
+ state->freesync_range.vmax);
+ }
+
+ if (min_refresh != 0 &&
+ dc_is_embedded_signal(streams->sink->sink_signal) &&
+ (max_refresh - min_refresh >= 10000000)) {
+ caps->supported = true;
+ caps->min_refresh_in_micro_hz = min_refresh;
+ caps->max_refresh_in_micro_hz = max_refresh;
+ }
+
+ /* Update the stream */
+ update_stream(core_freesync, streams);
+
+ return true;
+}
+
+bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *min_refresh,
+ unsigned int *max_refresh)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *min_refresh =
+ core_freesync->map[index].state.freesync_range.min_refresh;
+ *max_refresh =
+ core_freesync->map[index].state.freesync_range.max_refresh;
+
+ return true;
+}
+
+bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *vmin,
+ unsigned int *vmax)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ *vmin =
+ core_freesync->map[index].state.freesync_range.vmin;
+ *vmax =
+ core_freesync->map[index].state.freesync_range.vmax;
+
+ return true;
+}
+
+bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *nom_v_pos,
+ unsigned int *v_pos)
+{
+ unsigned int index = 0;
+ struct core_freesync *core_freesync = NULL;
+ struct crtc_position position;
+
+ if (mod_freesync == NULL)
+ return false;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+ index = map_index_from_stream(core_freesync, stream);
+
+ if (core_freesync->dc->stream_funcs.get_crtc_position(
+ core_freesync->dc, &stream, 1,
+ &position.vertical_count, &position.nominal_vcount)) {
+
+ *nom_v_pos = position.nominal_vcount;
+ *v_pos = position.vertical_count;
+
+ return true;
+ }
+
+ return false;
+}
+
+void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams)
+{
+ unsigned int stream_index, map_index;
+ struct freesync_state *state;
+ struct core_freesync *core_freesync = NULL;
+ struct dc_static_screen_events triggers = {0};
+ unsigned long long temp = 0;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ state = &core_freesync->map[map_index].state;
+
+ /* Update the field rate for new timing */
+ temp = streams[stream_index]->timing.pix_clk_khz;
+ temp *= 1000ULL * 1000ULL * 1000ULL;
+ temp = div_u64(temp,
+ streams[stream_index]->timing.h_total);
+ temp = div_u64(temp,
+ streams[stream_index]->timing.v_total);
+ state->nominal_refresh_rate_in_micro_hz =
+ (unsigned int) temp;
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ /* Update the stream */
+ update_stream(core_freesync, streams[stream_index]);
+
+ /* Calculate vmin/vmax and refresh rate for
+ * current mode
+ */
+ calc_freesync_range(core_freesync, *streams, state,
+ core_freesync->map[map_index].caps->
+ min_refresh_in_micro_hz,
+ state->nominal_refresh_rate_in_micro_hz);
+
+ /* Update mask */
+ triggers.overlay_update = true;
+ triggers.surface_update = true;
+
+ core_freesync->dc->stream_funcs.set_static_screen_events(
+ core_freesync->dc, streams, num_streams,
+ &triggers);
+ }
+ }
+
+ /* Program freesync according to current state*/
+ set_freesync_on_streams(core_freesync, streams, num_streams);
+}
+
+/* Add the timestamps to the cache and determine whether BTR programming
+ * is required, depending on the times calculated
+ */
+static void update_timestamps(struct core_freesync *core_freesync,
+ const struct dc_stream_state *stream, unsigned int map_index,
+ unsigned int last_render_time_in_us)
+{
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ state->time.render_times[state->time.render_times_index] =
+ last_render_time_in_us;
+ state->time.render_times_index++;
+
+ if (state->time.render_times_index >= RENDER_TIMES_MAX_COUNT)
+ state->time.render_times_index = 0;
+
+ if (last_render_time_in_us + BTR_EXIT_MARGIN <
+ state->time.max_render_time_in_us) {
+
+ /* Exit Below the Range */
+ if (state->btr.btr_active) {
+
+ state->btr.program_btr = true;
+ state->btr.btr_active = false;
+ state->btr.frame_counter = 0;
+
+ /* Exit Fixed Refresh mode */
+ } else if (state->fixed_refresh.fixed_active) {
+
+ state->fixed_refresh.frame_counter++;
+
+ if (state->fixed_refresh.frame_counter >
+ FIXED_REFRESH_EXIT_FRAME_COUNT) {
+ state->fixed_refresh.frame_counter = 0;
+ state->fixed_refresh.program_fixed = true;
+ state->fixed_refresh.fixed_active = false;
+ }
+ }
+
+ } else if (last_render_time_in_us > state->time.max_render_time_in_us) {
+
+ /* Enter Below the Range */
+ if (!state->btr.btr_active &&
+ core_freesync->map[map_index].caps->btr_supported) {
+
+ state->btr.program_btr = true;
+ state->btr.btr_active = true;
+
+ /* Enter Fixed Refresh mode */
+ } else if (!state->fixed_refresh.fixed_active &&
+ !core_freesync->map[map_index].caps->btr_supported) {
+
+ state->fixed_refresh.frame_counter++;
+
+ if (state->fixed_refresh.frame_counter >
+ FIXED_REFRESH_ENTER_FRAME_COUNT) {
+ state->fixed_refresh.frame_counter = 0;
+ state->fixed_refresh.program_fixed = true;
+ state->fixed_refresh.fixed_active = true;
+ }
+ }
+ }
+
+ /* When Below the Range is active, must react on every frame */
+ if (state->btr.btr_active)
+ state->btr.program_btr = true;
+}
+
+static void apply_below_the_range(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream, unsigned int map_index,
+ unsigned int last_render_time_in_us)
+{
+ unsigned int inserted_frame_duration_in_us = 0;
+ unsigned int mid_point_frames_ceil = 0;
+ unsigned int mid_point_frames_floor = 0;
+ unsigned int frame_time_in_us = 0;
+ unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF;
+ unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF;
+ unsigned int frames_to_insert = 0;
+ unsigned int min_frame_duration_in_ns = 0;
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ if (!state->btr.program_btr)
+ return;
+
+ state->btr.program_btr = false;
+
+ min_frame_duration_in_ns = ((unsigned int) (div64_u64(
+ (1000000000ULL * 1000000),
+ state->nominal_refresh_rate_in_micro_hz)));
+
+ /* Program BTR */
+
+ /* BTR set to "not active" so disengage */
+ if (!state->btr.btr_active)
+
+ /* Restore FreeSync */
+ set_freesync_on_streams(core_freesync, &stream, 1);
+
+ /* BTR set to "active" so engage */
+ else {
+
+ /* Calculate number of midPoint frames that could fit within
+ * the render time interval- take ceil of this value
+ */
+ mid_point_frames_ceil = (last_render_time_in_us +
+ state->btr.mid_point_in_us- 1) /
+ state->btr.mid_point_in_us;
+
+ if (mid_point_frames_ceil > 0) {
+
+ frame_time_in_us = last_render_time_in_us /
+ mid_point_frames_ceil;
+ delta_from_mid_point_in_us_1 =
+ (state->btr.mid_point_in_us >
+ frame_time_in_us) ?
+ (state->btr.mid_point_in_us - frame_time_in_us):
+ (frame_time_in_us - state->btr.mid_point_in_us);
+ }
+
+ /* Calculate number of midPoint frames that could fit within
+ * the render time interval- take floor of this value
+ */
+ mid_point_frames_floor = last_render_time_in_us /
+ state->btr.mid_point_in_us;
+
+ if (mid_point_frames_floor > 0) {
+
+ frame_time_in_us = last_render_time_in_us /
+ mid_point_frames_floor;
+ delta_from_mid_point_in_us_2 =
+ (state->btr.mid_point_in_us >
+ frame_time_in_us) ?
+ (state->btr.mid_point_in_us - frame_time_in_us):
+ (frame_time_in_us - state->btr.mid_point_in_us);
+ }
+
+ /* Choose number of frames to insert based on how close it
+ * can get to the mid point of the variable range.
+ */
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2)
+ frames_to_insert = mid_point_frames_ceil;
+ else
+ frames_to_insert = mid_point_frames_floor;
+
+ /* Either we've calculated the number of frames to insert,
+ * or we need to insert min duration frames
+ */
+ if (frames_to_insert > 0)
+ inserted_frame_duration_in_us = last_render_time_in_us /
+ frames_to_insert;
+
+ if (inserted_frame_duration_in_us <
+ state->time.min_render_time_in_us)
+
+ inserted_frame_duration_in_us =
+ state->time.min_render_time_in_us;
+
+ /* Cache the calculated variables */
+ state->btr.inserted_frame_duration_in_us =
+ inserted_frame_duration_in_us;
+ state->btr.frames_to_insert = frames_to_insert;
+ state->btr.frame_counter = frames_to_insert;
+
+ }
+}
+
+static void apply_fixed_refresh(struct core_freesync *core_freesync,
+ struct dc_stream_state *stream, unsigned int map_index)
+{
+ unsigned int vmin = 0, vmax = 0;
+ struct freesync_state *state = &core_freesync->map[map_index].state;
+
+ if (!state->fixed_refresh.program_fixed)
+ return;
+
+ state->fixed_refresh.program_fixed = false;
+
+ /* Program Fixed Refresh */
+
+ /* Fixed Refresh set to "not active" so disengage */
+ if (!state->fixed_refresh.fixed_active) {
+ set_freesync_on_streams(core_freesync, &stream, 1);
+
+ /* Fixed Refresh set to "active" so engage (fix to max) */
+ } else {
+
+ vmin = state->freesync_range.vmin;
+
+ vmax = vmin;
+
+ core_freesync->dc->stream_funcs.adjust_vmin_vmax(
+ core_freesync->dc, &stream,
+ 1, vmin,
+ vmax);
+ }
+}
+
+void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int curr_time_stamp_in_us)
+{
+ unsigned int stream_index, map_index, last_render_time_in_us = 0;
+ struct core_freesync *core_freesync = NULL;
+
+ if (mod_freesync == NULL)
+ return;
+
+ core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync);
+
+ for (stream_index = 0; stream_index < num_streams; stream_index++) {
+
+ map_index = map_index_from_stream(core_freesync,
+ streams[stream_index]);
+
+ if (core_freesync->map[map_index].caps->supported) {
+
+ last_render_time_in_us = curr_time_stamp_in_us -
+ core_freesync->map[map_index].state.time.
+ prev_time_stamp_in_us;
+
+ /* Add the timestamps to the cache and determine
+ * whether BTR program is required
+ */
+ update_timestamps(core_freesync, streams[stream_index],
+ map_index, last_render_time_in_us);
+
+ if (core_freesync->map[map_index].state.fullscreen &&
+ core_freesync->map[map_index].user_enable.
+ enable_for_gaming) {
+
+ if (core_freesync->map[map_index].caps->btr_supported) {
+
+ apply_below_the_range(core_freesync,
+ streams[stream_index], map_index,
+ last_render_time_in_us);
+ } else {
+ apply_fixed_refresh(core_freesync,
+ streams[stream_index], map_index);
+ }
+ }
+
+ core_freesync->map[map_index].state.time.
+ prev_time_stamp_in_us = curr_time_stamp_in_us;
+ }
+
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
new file mode 100644
index 000000000000..84b53425f2c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+
+
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MOD_FREESYNC_H_
+#define MOD_FREESYNC_H_
+
+#include "dm_services.h"
+
+struct mod_freesync *mod_freesync_create(struct dc *dc);
+void mod_freesync_destroy(struct mod_freesync *mod_freesync);
+
+struct mod_freesync {
+ int dummy;
+};
+
+enum mod_freesync_state {
+ FREESYNC_STATE_NONE,
+ FREESYNC_STATE_FULLSCREEN,
+ FREESYNC_STATE_STATIC_SCREEN,
+ FREESYNC_STATE_VIDEO
+};
+
+enum mod_freesync_user_enable_mask {
+ FREESYNC_USER_ENABLE_STATIC = 0x1,
+ FREESYNC_USER_ENABLE_VIDEO = 0x2,
+ FREESYNC_USER_ENABLE_GAMING = 0x4
+};
+
+struct mod_freesync_user_enable {
+ bool enable_for_static;
+ bool enable_for_video;
+ bool enable_for_gaming;
+};
+
+struct mod_freesync_caps {
+ bool supported;
+ unsigned int min_refresh_in_micro_hz;
+ unsigned int max_refresh_in_micro_hz;
+
+ bool btr_supported;
+};
+
+struct mod_freesync_params {
+ enum mod_freesync_state state;
+ bool enable;
+ unsigned int update_duration_in_ns;
+ bool windowed_fullscreen;
+};
+
+/*
+ * Add stream to be tracked by module
+ */
+bool mod_freesync_add_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream, struct mod_freesync_caps *caps);
+
+/*
+ * Remove stream to be tracked by module
+ */
+bool mod_freesync_remove_stream(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream);
+
+/*
+ * Update the freesync state flags for each display and program
+ * freesync accordingly
+ */
+void mod_freesync_update_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_params *freesync_params);
+
+bool mod_freesync_get_state(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_params *freesync_params);
+
+bool mod_freesync_set_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ struct mod_freesync_user_enable *user_enable);
+
+bool mod_freesync_get_user_enable(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ struct mod_freesync_user_enable *user_enable);
+
+bool mod_freesync_get_static_ramp_active(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ bool *is_ramp_active);
+
+bool mod_freesync_override_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *streams,
+ unsigned int min_refresh,
+ unsigned int max_refresh,
+ struct mod_freesync_caps *caps);
+
+bool mod_freesync_get_min_max(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *min_refresh,
+ unsigned int *max_refresh);
+
+bool mod_freesync_get_vmin_vmax(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *vmin,
+ unsigned int *vmax);
+
+bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
+ struct dc_stream_state *stream,
+ unsigned int *nom_v_pos,
+ unsigned int *v_pos);
+
+void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams);
+
+void mod_freesync_notify_mode_change(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams);
+
+void mod_freesync_pre_update_plane_addresses(struct mod_freesync *mod_freesync,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int curr_time_stamp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
index b39fb6821faa..4ccf9681c45d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_offset.h
@@ -2283,6 +2283,10 @@
#define mmDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
#define mmDCHUBBUB_SPARE 0x0534
#define mmDCHUBBUB_SPARE_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_INDEX 0x053a
+#define mmDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_DATA 0x053b
+#define mmDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
// addressBlock: dce_dc_dchubbub_dchubbub_dcperfmon_dc_perfmon_dispdec
@@ -10361,6 +10365,8 @@
#define mmUNIPHYG_CHANNEL_XBAR_CNTL_BASE_IDX 2
#define mmDCIO_WRCMD_DELAY 0x287e
#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2880
+#define mmDC_PINSTRAPS_BASE_IDX 2
#define mmDC_DVODATA_CONFIG 0x2882
#define mmDC_DVODATA_CONFIG_BASE_IDX 2
#define mmLVTMA_PWRSEQ_CNTL 0x2883
diff --git a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
index 1e98ce86ed19..b28d4b64c05d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/raven1/DCN/dcn_1_0_sh_mask.h
@@ -9361,12 +9361,14 @@
#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
@@ -39956,6 +39958,9 @@
#define DCIO_WRCMD_DELAY__DPHY_DELAY_MASK 0x00000F00L
#define DCIO_WRCMD_DELAY__DCRXPHY_DELAY_MASK 0x0000F000L
#define DCIO_WRCMD_DELAY__ZCAL_DELAY_MASK 0x000F0000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
//DC_DVODATA_CONFIG
#define DC_DVODATA_CONFIG__VIP_MUX_EN__SHIFT 0x13
#define DC_DVODATA_CONFIG__VIP_ALTER_MAPPING_EN__SHIFT 0x14
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
index 75b660d57bdf..f730d0629020 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h
@@ -1841,6 +1841,10 @@
#define mmUNIPHYG_CHANNEL_XBAR_CNTL_BASE_IDX 2
#define mmDCIO_WRCMD_DELAY 0x2094
#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2096
+#define mmDC_PINSTRAPS_BASE_IDX 2
+#define mmCC_DC_MISC_STRAPS 0x2097
+#define mmCC_DC_MISC_STRAPS_BASE_IDX 2
#define mmDC_DVODATA_CONFIG 0x2098
#define mmDC_DVODATA_CONFIG_BASE_IDX 2
#define mmLVTMA_PWRSEQ_CNTL 0x2099
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
index d8ad862b3a74..6d3162c42957 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h
@@ -2447,6 +2447,14 @@
//DCCG_CBUS_WRCMD_DELAY
#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY__SHIFT 0x0
#define DCCG_CBUS_WRCMD_DELAY__CBUS_PLL_WRCMD_DELAY_MASK 0x0000000FL
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+//CC_DC_MISC_STRAPS
+#define CC_DC_MISC_STRAPS__HDMI_DISABLE__SHIFT 0x6
+#define CC_DC_MISC_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8
+#define CC_DC_MISC_STRAPS__HDMI_DISABLE_MASK 0x00000040L
+#define CC_DC_MISC_STRAPS__AUDIO_STREAM_NUMBER_MASK 0x00000700L
//DCCG_DS_DTO_INCR
#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 68b417ac94dd..8c55c6e254d9 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/powerplay/inc/ \
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index dc4bbcfe1243..824fb6fe54ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 'hw manager' sub-component of powerplay.
# It provides the hardware management services for the driver.
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
index 8ba75d43fba6..67fae834bc67 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_overdriver.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "pp_overdriver.h"
#include <linux/errno.h>
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index a129bc5b1844..c6febbf0bf69 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1486,7 +1486,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
if (vddci_id_buf[i] == virtual_voltage_id) {
for (j = 0; j < profile->ucLeakageBinNum; j++) {
if (efuse_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
break;
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index d1af1483c69b..a651ebcf44fd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -830,9 +830,9 @@ static int init_over_drive_limits(
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
{
hwmgr->platform_descriptor.overdriveLimit.engineClock =
- le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+ le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
- le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+ le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
hwmgr->platform_descriptor.minOverdriveVDDC = 0;
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 4466469cf8ab..e33ec7fc5d09 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3778,7 +3778,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 4f79c21f27ed..f8d838c2c8ee 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -753,6 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct cgs_system_info sys_info = {0};
+ uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
@@ -859,6 +860,16 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ reg = soc15_get_register_offset(DF_HWID, 0,
+ mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
+ mmDF_CS_AON0_DramBaseAddress0);
+ data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
+ "Mem Channel Index Exceeded maximum!",
+ return -EINVAL);
+
return result;
}
@@ -1777,7 +1788,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
int result = 0;
- uint32_t i, j, reg, mem_channels;
+ uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
@@ -1801,20 +1812,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
i++;
}
- reg = soc15_get_register_offset(DF_HWID, 0,
- mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
- mmDF_CS_AON0_DramBaseAddress0);
- mem_channels = (cgs_read_register(hwmgr->device, reg) &
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
- "Mem Channel Index Exceeded maximum!",
- return -1);
-
- pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
+ pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
pp_table->MemoryChannelWidth =
- cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
- channel_number[mem_channels]);
+ (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
+ channel_number[data->mem_channels]);
pp_table->LowestUclkReservedForUlv =
(uint8_t)(data->lowest_uclk_reserved_for_ulv);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
index b4b461c3b8ee..8f7358cc3327 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h
@@ -389,6 +389,7 @@ struct vega10_hwmgr {
uint32_t config_telemetry;
uint32_t smu_version;
uint32_t acg_loop_state;
+ uint32_t mem_channels;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72.h b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
index b73d6b59ac32..08cd70c75d8b 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SMU72_H
#define SMU72_H
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
index 98f76e925e65..b2edbc0c3c4d 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef SMU72_DISCRETE_H
#define SMU72_DISCRETE_H
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index b24b0f203a51..30d3089d7dba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 8bd38102b58e..283a0dc25e84 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index e4d3b4ec4e92..92ec663fdada 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -188,7 +188,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
if (kfifo_is_empty(&entity->job_queue))
return false;
- if (ACCESS_ONCE(entity->dependency))
+ if (READ_ONCE(entity->dependency))
return false;
return true;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 63511a3bbf6c..630721f429f7 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -317,9 +317,8 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
formats, ARRAY_SIZE(formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
+ if (ret)
return ERR_PTR(ret);
- }
drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
hdlcd->plane = plane;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 59b21bdc0c30..feaa8bc3d7b7 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/console.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
@@ -354,7 +355,7 @@ err_unload:
err_free:
drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -379,7 +380,7 @@ static void hdlcd_drm_unbind(struct device *dev)
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
}
@@ -432,9 +433,11 @@ static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
return 0;
drm_kms_helper_poll_disable(drm);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
hdlcd->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(hdlcd->state)) {
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
return PTR_ERR(hdlcd->state);
}
@@ -451,8 +454,8 @@ static int __maybe_unused hdlcd_pm_resume(struct device *dev)
return 0;
drm_atomic_helper_resume(drm, hdlcd->state);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
- pm_runtime_set_active(dev);
return 0;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index e3950a071152..56f34dfff640 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* ARM HDLCD Controller register definition
*/
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 3615d18a7ddf..904fff80917b 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -65,8 +65,8 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
/* We rely on firmware to set mclk to a sensible level. */
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
- hwdev->modeset(hwdev, &vm);
- hwdev->leave_config_mode(hwdev);
+ hwdev->hw->modeset(hwdev, &vm);
+ hwdev->hw->leave_config_mode(hwdev);
drm_crtc_vblank_on(crtc);
}
@@ -77,8 +77,12 @@ static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
struct malidp_hw_device *hwdev = malidp->dev;
int err;
+ /* always disable planes on the CRTC that is being turned off */
+ drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
drm_crtc_vblank_off(crtc);
- hwdev->enter_config_mode(hwdev);
+ hwdev->hw->enter_config_mode(hwdev);
+
clk_disable_unprepare(hwdev->pxlclk);
err = pm_runtime_put(crtc->dev->dev);
@@ -319,7 +323,7 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
mclk_calc:
drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
- ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+ ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
if (ret < 0)
return -EINVAL;
return 0;
@@ -475,7 +479,7 @@ static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.vsync_irq);
+ hwdev->hw->map.de_irq_map.vsync_irq);
return 0;
}
@@ -485,7 +489,7 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.vsync_irq);
+ hwdev->hw->map.de_irq_map.vsync_irq);
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index b8944666a18f..91f2b0191368 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -47,10 +47,10 @@ static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
* directly.
*/
malidp_hw_write(hwdev, gamma_write_mask,
- hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+ hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
malidp_hw_write(hwdev, data[i],
- hwdev->map.coeffs_base +
+ hwdev->hw->map.coeffs_base +
MALIDP_COEF_TABLE_DATA);
}
@@ -103,7 +103,7 @@ void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
malidp_hw_write(hwdev,
mc->coloradj_coeffs[i],
- hwdev->map.coeffs_base +
+ hwdev->hw->map.coeffs_base +
MALIDP_COLOR_ADJ_COEF + 4 * i);
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
@@ -120,8 +120,8 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
struct malidp_hw_device *hwdev = malidp->dev;
struct malidp_se_config *s = &cs->scaler_config;
struct malidp_se_config *old_s = &old_cs->scaler_config;
- u32 se_control = hwdev->map.se_base +
- ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ u32 se_control = hwdev->hw->map.se_base +
+ ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
0x10 : 0xC);
u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
@@ -135,7 +135,7 @@ static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
return;
}
- hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+ hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
val = malidp_hw_read(hwdev, se_control);
val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
@@ -170,9 +170,9 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
int ret;
atomic_set(&malidp->config_valid, 0);
- hwdev->set_config_valid(hwdev);
+ hwdev->hw->set_config_valid(hwdev);
/* don't wait for config_valid flag if we are in config mode */
- if (hwdev->in_config_mode(hwdev))
+ if (hwdev->hw->in_config_mode(hwdev))
return 0;
ret = wait_event_interruptible_timeout(malidp->wq,
@@ -455,7 +455,7 @@ static int malidp_runtime_pm_suspend(struct device *dev)
struct malidp_hw_device *hwdev = malidp->dev;
/* we can only suspend if the hardware is in config mode */
- WARN_ON(!hwdev->in_config_mode(hwdev));
+ WARN_ON(!hwdev->hw->in_config_mode(hwdev));
hwdev->pm_suspended = true;
clk_disable_unprepare(hwdev->mclk);
@@ -500,11 +500,7 @@ static int malidp_bind(struct device *dev)
if (!hwdev)
return -ENOMEM;
- /*
- * copy the associated data from malidp_drm_of_match to avoid
- * having to keep a reference to the OF node after binding
- */
- memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+ hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
malidp->dev = hwdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -568,13 +564,13 @@ static int malidp_bind(struct device *dev)
goto query_hw_fail;
}
- ret = hwdev->query_hw(hwdev);
+ ret = hwdev->hw->query_hw(hwdev);
if (ret) {
DRM_ERROR("Invalid HW configuration\n");
goto query_hw_fail;
}
- version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+ version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
(version >> 12) & 0xf, (version >> 8) & 0xf);
@@ -589,7 +585,7 @@ static int malidp_bind(struct device *dev)
for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
- malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+ malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
atomic_set(&malidp->config_valid, 0);
init_waitqueue_head(&malidp->wq);
@@ -671,7 +667,7 @@ query_hw_fail:
malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
alloc_fail:
of_reserved_mem_device_release(dev);
@@ -704,7 +700,7 @@ static void malidp_unbind(struct device *dev)
malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 17bca99e8ac8..2bfb542135ac 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -183,7 +183,7 @@ static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
break;
/*
@@ -203,7 +203,7 @@ static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
@@ -216,7 +216,7 @@ static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
{
u32 status;
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
return true;
@@ -407,7 +407,7 @@ static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
break;
/*
@@ -427,7 +427,7 @@ static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
@@ -440,7 +440,7 @@ static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
{
u32 status;
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
return true;
@@ -616,7 +616,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev)
return 0;
}
-const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
[MALIDP_500] = {
.map = {
.coeffs_base = MALIDP500_COEFFS_BASE,
@@ -751,7 +751,7 @@ static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 ir
{
u32 base = malidp_get_block_base(hwdev, block);
- if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+ if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
else
malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
@@ -762,12 +762,14 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev;
+ struct malidp_hw *hw;
const struct malidp_irq_map *de;
u32 status, mask, dc_status;
irqreturn_t ret = IRQ_NONE;
hwdev = malidp->dev;
- de = &hwdev->map.de_irq_map;
+ hw = hwdev->hw;
+ de = &hw->map.de_irq_map;
/*
* if we are suspended it is likely that we were invoked because
@@ -778,8 +780,8 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
return IRQ_NONE;
/* first handle the config valid IRQ */
- dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
- if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+ dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+ if (dc_status & hw->map.dc_irq_map.vsync_irq) {
/* we have a page flip event */
atomic_set(&malidp->config_valid, 1);
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
@@ -832,11 +834,11 @@ int malidp_de_irq_init(struct drm_device *drm, int irq)
/* first enable the DC block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->map.dc_irq_map.irq_mask);
+ hwdev->hw->map.dc_irq_map.irq_mask);
/* now enable the DE block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.irq_mask);
+ hwdev->hw->map.de_irq_map.irq_mask);
return 0;
}
@@ -847,9 +849,9 @@ void malidp_de_irq_fini(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.irq_mask);
+ hwdev->hw->map.de_irq_map.irq_mask);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->map.dc_irq_map.irq_mask);
+ hwdev->hw->map.dc_irq_map.irq_mask);
}
static irqreturn_t malidp_se_irq(int irq, void *arg)
@@ -857,6 +859,8 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_hw *hw = hwdev->hw;
+ const struct malidp_irq_map *se = &hw->map.se_irq_map;
u32 status, mask;
/*
@@ -867,12 +871,12 @@ static irqreturn_t malidp_se_irq(int irq, void *arg)
if (hwdev->pm_suspended)
return IRQ_NONE;
- status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
- if (!(status & hwdev->map.se_irq_map.irq_mask))
+ status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
+ if (!(status & se->irq_mask))
return IRQ_NONE;
- mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
- status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+ mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
+ status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
status &= mask;
/* ToDo: status decoding and firing up of VSYNC and page flip events */
@@ -905,7 +909,7 @@ int malidp_se_irq_init(struct drm_device *drm, int irq)
}
malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->map.se_irq_map.irq_mask);
+ hwdev->hw->map.se_irq_map.irq_mask);
return 0;
}
@@ -916,5 +920,5 @@ void malidp_se_irq_fini(struct drm_device *drm)
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->map.se_irq_map.irq_mask);
+ hwdev->hw->map.se_irq_map.irq_mask);
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 849ad9a30c3a..b0690ebb3565 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -120,18 +120,14 @@ struct malidp_hw_regmap {
/* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
#define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
-struct malidp_hw_device {
- const struct malidp_hw_regmap map;
- void __iomem *regs;
+struct malidp_hw_device;
- /* APB clock */
- struct clk *pclk;
- /* AXI clock */
- struct clk *aclk;
- /* main clock for display core */
- struct clk *mclk;
- /* pixel clock for display core */
- struct clk *pxlclk;
+/*
+ * Static structure containing hardware specific data and pointers to
+ * functions that behave differently between various versions of the IP.
+ */
+struct malidp_hw {
+ const struct malidp_hw_regmap map;
/*
* Validate the driver instance against the hardware bits
@@ -182,15 +178,6 @@ struct malidp_hw_device {
struct videomode *vm);
u8 features;
-
- u8 min_line_size;
- u16 max_line_size;
-
- /* track the device PM state */
- bool pm_suspended;
-
- /* size of memory used for rotating layers, up to two banks available */
- u32 rotation_memory[2];
};
/* Supported variants of the hardware */
@@ -202,7 +189,33 @@ enum {
MALIDP_MAX_DEVICES
};
-extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES];
+
+/*
+ * Structure used by the driver during runtime operation.
+ */
+struct malidp_hw_device {
+ struct malidp_hw *hw;
+ void __iomem *regs;
+
+ /* APB clock */
+ struct clk *pclk;
+ /* AXI clock */
+ struct clk *aclk;
+ /* main clock for display core */
+ struct clk *mclk;
+ /* pixel clock for display core */
+ struct clk *pxlclk;
+
+ u8 min_line_size;
+ u16 max_line_size;
+
+ /* track the device PM state */
+ bool pm_suspended;
+
+ /* size of memory used for rotating layers, up to two banks available */
+ u32 rotation_memory[2];
+};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
{
@@ -240,9 +253,9 @@ static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
{
switch (block) {
case MALIDP_SE_BLOCK:
- return hwdev->map.se_base;
+ return hwdev->hw->map.se_base;
case MALIDP_DC_BLOCK:
- return hwdev->map.dc_base;
+ return hwdev->hw->map.dc_base;
}
return 0;
@@ -275,7 +288,7 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
unsigned int pitch)
{
- return !(pitch & (hwdev->map.bus_align_bytes - 1));
+ return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
}
/* U16.16 */
@@ -308,8 +321,8 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
};
u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
- u32 image_enh = hwdev->map.se_base +
- ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ u32 image_enh = hwdev->hw->map.se_base +
+ ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
int i;
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 72a07950167e..33c5ef96ced0 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -57,7 +57,7 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
struct malidp_plane *mp = to_malidp_plane(plane);
if (mp->base.fb)
- drm_framebuffer_unreference(mp->base.fb);
+ drm_framebuffer_put(mp->base.fb);
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
@@ -186,8 +186,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
fb = state->fb;
- ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- fb->format->format);
+ ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
+ mp->layer->id,
+ fb->format->format);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
@@ -212,7 +213,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
* third plane stride register.
*/
if (ms->n_planes == 3 &&
- !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+ !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
(state->fb->pitches[1] != state->fb->pitches[2]))
return -EINVAL;
@@ -230,9 +231,9 @@ static int malidp_de_plane_check(struct drm_plane *plane,
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
- val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
- state->crtc_w,
- fb->format->format);
+ val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
+ state->crtc_w,
+ fb->format->format);
if (val < 0)
return val;
@@ -252,7 +253,7 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
return;
if (num_planes == 3)
- num_strides = (mp->hwdev->features &
+ num_strides = (mp->hwdev->hw->features &
MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
for (i = 0; i < num_strides; ++i)
@@ -265,13 +266,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct malidp_plane *mp;
- const struct malidp_hw_regmap *map;
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u32 src_w, src_h, dest_w, dest_h, val;
int i;
mp = to_malidp_plane(plane);
- map = &mp->hwdev->map;
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
@@ -364,7 +363,7 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm->dev_private;
- const struct malidp_hw_regmap *map = &malidp->dev->map;
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index a18f156c8b66..ecf25cf9f9f5 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
armada_gem.o armada_overlay.o armada_trace.o
armada-y += armada_510.o
diff --git a/drivers/gpu/drm/armada/armada_trace.c b/drivers/gpu/drm/armada/armada_trace.c
index 068b336ba75f..c64cce325cdf 100644
--- a/drivers/gpu/drm/armada/armada_trace.c
+++ b/drivers/gpu/drm/armada/armada_trace.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "armada_trace.h"
diff --git a/drivers/gpu/drm/armada/armada_trace.h b/drivers/gpu/drm/armada/armada_trace.h
index be245a24610f..8dbfea7a00fe 100644
--- a/drivers/gpu/drm/armada/armada_trace.h
+++ b/drivers/gpu/drm/armada/armada_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(ARMADA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define ARMADA_TRACE_H
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 749646ae365f..4c7375b45281 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/firmware.h>
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h
index 1d9c4e75d303..1e9ac9d6d26c 100644
--- a/drivers/gpu/drm/ast/ast_dram_tables.h
+++ b/drivers/gpu/drm/ast/ast_dram_tables.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef AST_DRAM_TABLES_H
#define AST_DRAM_TABLES_H
diff --git a/drivers/gpu/drm/atmel-hlcdc/Makefile b/drivers/gpu/drm/atmel-hlcdc/Makefile
index bb5f8507a8ce..49dc89f36b73 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Makefile
+++ b/drivers/gpu/drm/atmel-hlcdc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
atmel-hlcdc-dc-y := atmel_hlcdc_crtc.o \
atmel_hlcdc_dc.o \
atmel_hlcdc_output.o \
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 76c490c3cdbc..375bf92cd04f 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/io.h>
#include <linux/console.h>
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index e3d5eb031f18..373eb28f31ed 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
obj-$(CONFIG_DRM_LVDS_ENCODER) += lvds-encoder.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index b4efcbabf7f7..d034b2cb5eee 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -372,9 +372,18 @@ struct adv7511 {
};
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
- unsigned int offset);
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return 0;
+}
#endif
#ifdef CONFIG_DRM_I2C_ADV7533
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index b33d730e4d73..a20a45c0b353 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -300,18 +300,21 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
- unsigned int offset)
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
- return ret;
+ goto err_cec_parse_dt;
adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap))
- return PTR_ERR(adv7511->cec_adap);
+ if (IS_ERR(adv7511->cec_adap)) {
+ ret = PTR_ERR(adv7511->cec_adap);
+ goto err_cec_alloc;
+ }
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
/* cec soft reset */
@@ -329,9 +332,18 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret) {
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
- }
- return ret;
+ if (ret)
+ goto err_cec_register;
+ return 0;
+
+err_cec_register:
+ cec_delete_adapter(adv7511->cec_adap);
+ adv7511->cec_adap = NULL;
+err_cec_alloc:
+ dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+ ret);
+err_cec_parse_dt:
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return ret == -EPROBE_DEFER ? ret : 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 0e14f1572d05..efa29db5fc2b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1084,7 +1084,6 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
struct device *dev = &i2c->dev;
unsigned int main_i2c_addr = i2c->addr << 1;
unsigned int edid_i2c_addr = main_i2c_addr + 4;
- unsigned int offset;
unsigned int val;
int ret;
@@ -1192,24 +1191,16 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (adv7511->type == ADV7511)
adv7511_set_link_config(adv7511, &link_config);
+ ret = adv7511_cec_init(dev, adv7511);
+ if (ret)
+ goto err_unregister_cec;
+
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
-
- offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
-
-#ifdef CONFIG_DRM_I2C_ADV7511_CEC
- ret = adv7511_cec_init(dev, adv7511, offset);
- if (ret)
- goto err_unregister_cec;
-#else
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
- ADV7511_CEC_CTRL_POWER_DOWN);
-#endif
-
return 0;
err_unregister_cec:
diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
index 0903ba574f61..75b0d3f6e4de 100644
--- a/drivers/gpu/drm/bridge/lvds-encoder.c
+++ b/drivers/gpu/drm/bridge/lvds-encoder.c
@@ -13,13 +13,37 @@
#include <linux/of_graph.h>
+struct lvds_encoder {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+ struct lvds_encoder *lvds_encoder = container_of(bridge,
+ struct lvds_encoder,
+ bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+ bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+ .attach = lvds_encoder_attach,
+};
+
static int lvds_encoder_probe(struct platform_device *pdev)
{
struct device_node *port;
struct device_node *endpoint;
struct device_node *panel_node;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct lvds_encoder *lvds_encoder;
+
+ lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+ GFP_KERNEL);
+ if (!lvds_encoder)
+ return -ENOMEM;
/* Locate the panel DT node. */
port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
@@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
+ lvds_encoder->panel_bridge =
+ devm_drm_panel_bridge_add(&pdev->dev,
+ panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds_encoder->panel_bridge))
+ return PTR_ERR(lvds_encoder->panel_bridge);
+
+ /* The panel_bridge bridge is attached to the panel's of_node,
+ * but we need a bridge attached to our of_node for our user
+ * to look up.
+ */
+ lvds_encoder->bridge.of_node = pdev->dev.of_node;
+ lvds_encoder->bridge.funcs = &funcs;
+ drm_bridge_add(&lvds_encoder->bridge);
- platform_set_drvdata(pdev, bridge);
+ platform_set_drvdata(pdev, lvds_encoder);
return 0;
}
static int lvds_encoder_remove(struct platform_device *pdev)
{
- struct drm_bridge *bridge = platform_get_drvdata(pdev);
+ struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&lvds_encoder->bridge);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
index fd1f745c6073..63b5756f463b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-audio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef DW_HDMI_AUDIO_H
#define DW_HDMI_AUDIO_H
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index b172139502d6..a38db40ce990 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -138,6 +138,7 @@ struct dw_hdmi {
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct clk *cec_clk;
struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
@@ -2382,6 +2383,26 @@ __dw_hdmi_probe(struct platform_device *pdev,
goto err_isfr;
}
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+ if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+ hdmi->cec_clk = NULL;
+ } else if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+ ret);
+
+ hdmi->cec_clk = NULL;
+ goto err_iahb;
+ } else {
+ ret = clk_prepare_enable(hdmi->cec_clk);
+ if (ret) {
+ dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+ ret);
+ goto err_iahb;
+ }
+ }
+
/* Product and revision IDs */
hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
| (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
@@ -2518,6 +2539,8 @@ err_iahb:
cec_notifier_put(hdmi->cec_notifier);
clk_disable_unprepare(hdmi->iahb_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
err_res:
@@ -2541,6 +2564,8 @@ static void __dw_hdmi_remove(struct dw_hdmi *hdmi)
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index e79fbc1b7704..08ab7d6aea65 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -99,7 +99,7 @@
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
#define DP0_MISC 0x0658
-#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
#define BPC_8 (1 << 5)
@@ -320,7 +320,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
tmp = (tmp << 8) | buf[i];
i++;
if (((i % 4) == 0) || (i == size)) {
- tc_write(DP0_AUXWDATA(i >> 2), tmp);
+ tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
tmp = 0;
}
}
@@ -605,8 +605,15 @@ static int tc_get_display_props(struct tc_data *tc)
ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
if (ret < 0)
goto err_dpcd_read;
- if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
- goto err_dpcd_inval;
+ if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+ dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+ tc->link.base.rate = 270000;
+ }
+
+ if (tc->link.base.num_lanes > 2) {
+ dev_dbg(tc->dev, "Falling to 2 lanes\n");
+ tc->link.base.num_lanes = 2;
+ }
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
if (ret < 0)
@@ -639,9 +646,6 @@ static int tc_get_display_props(struct tc_data *tc)
err_dpcd_read:
dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
return ret;
-err_dpcd_inval:
- dev_err(tc->dev, "invalid DPCD\n");
- return -EINVAL;
}
static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
@@ -657,6 +661,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
+ /*
+ * Recommended maximum number of symbols transferred in a transfer unit:
+ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+ * (output active video bandwidth in bytes))
+ * Must be less than tu_size.
+ */
+ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
@@ -666,13 +678,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
- /* LCD Ctl Frame Size */
- tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+ /*
+ * LCD Ctl Frame Size
+ * datasheet is not clear of vsdelay in case of DPI
+ * assume we do not need any delay when DPI is a source of
+ * sync signals
+ */
+ tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
- tc_write(HTIM01, (left_margin << 16) | /* H back porch */
- (hsync_len << 0)); /* Hsync */
- tc_write(HTIM02, (right_margin << 16) | /* H front porch */
- (mode->hdisplay << 0)); /* width */
+ tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+ (ALIGN(hsync_len, 2) << 0)); /* Hsync */
+ tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
+ (ALIGN(mode->hdisplay, 2) << 0)); /* width */
tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
(vsync_len << 0)); /* Vsync */
tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
@@ -691,7 +708,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
/* DP Main Stream Attributes */
vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
tc_write(DP0_VIDSYNCDELAY,
- (0x003e << 16) | /* thresh_dly */
+ (max_tu_symbol << 16) | /* thresh_dly */
(vid_sync_dly << 0));
tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
@@ -707,14 +724,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
- /*
- * Recommended maximum number of symbols transferred in a transfer unit:
- * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
- * (output active video bandwidth in bytes))
- * Must be less than tu_size.
- */
- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
- tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+ tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+ BPC_8);
return 0;
err:
@@ -810,8 +821,6 @@ static int tc_main_link_setup(struct tc_data *tc)
unsigned int rate;
u32 dp_phy_ctrl;
int timeout;
- bool aligned;
- bool ready;
u32 value;
int ret;
u8 tmp[8];
@@ -956,16 +965,15 @@ static int tc_main_link_setup(struct tc_data *tc)
ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
if (ret < 0)
goto err_dpcd_read;
- ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
- DP_CHANNEL_EQ_BITS)); /* Lane0 */
- aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
- } while ((--timeout) && !(ready && aligned));
+ } while ((--timeout) &&
+ !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
if (timeout == 0) {
/* Read DPCD 0x200-0x201 */
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
if (ret < 0)
goto err_dpcd_read;
+ dev_err(dev, "channel(s) EQ not ok\n");
dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
tmp[1]);
@@ -976,10 +984,6 @@ static int tc_main_link_setup(struct tc_data *tc)
dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
tmp[6]);
- if (!ready)
- dev_err(dev, "Lane0/1 not ready\n");
- if (!aligned)
- dev_err(dev, "Lane0/1 not aligned\n");
return -EAGAIN;
}
@@ -1101,7 +1105,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
static int tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- /* Accept any mode */
+ /* DPI interface clock limitation: upto 154 MHz */
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2f80377101a1..ab4032167094 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1325,7 +1325,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
return;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+ if (!new_crtc_state->active)
continue;
ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 704fc8934616..25f4b2e9a44f 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -234,6 +234,10 @@ int drm_connector_init(struct drm_device *dev,
config->link_status_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->non_desktop_property,
+ 0);
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
@@ -763,6 +767,10 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
* value of link-status is "GOOD". If something fails during or after modeset,
* the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
* should update this value using drm_mode_connector_set_link_status_property().
+ * non_desktop:
+ * Indicates the output should be ignored for purposes of displaying a
+ * standard desktop environment or console. This is most likely because
+ * the output device is not rectilinear.
*
* Connectors also have one standardized atomic property:
*
@@ -811,6 +819,11 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.link_status_property = prop;
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.non_desktop_property = prop;
+
return 0;
}
@@ -1194,6 +1207,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
size,
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index d34e5096887a..053044201e31 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -263,12 +263,6 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_aux(struct drm_dp_aux *aux)
return aux_dev;
}
-static int auxdev_wait_atomic_t(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
{
struct drm_dp_aux_dev *aux_dev;
@@ -283,7 +277,7 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
mutex_unlock(&aux_idr_mutex);
atomic_dec(&aux_dev->usecount);
- wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t,
+ wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait,
TASK_UNINTERRUPTIBLE);
minor = aux_dev->index;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9ada0ccf50df..524eace3d460 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -82,6 +82,8 @@
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
+/* Non desktop display (i.e. HMD) */
+#define EDID_QUIRK_NON_DESKTOP (1 << 12)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -157,6 +159,9 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+
+ /* HTC Vive VR Headset */
+ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -4408,7 +4413,7 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid)
+ struct edid *edid, u32 quirks)
{
struct drm_display_info *info = &connector->display_info;
@@ -4423,6 +4428,8 @@ static void drm_add_display_info(struct drm_connector *connector,
info->dvi_dual = false;
info->has_hdmi_infoframe = false;
+ info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+
if (edid->revision < 3)
return;
@@ -4647,7 +4654,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid);
+ drm_add_display_info(connector, edid, quirks);
/*
* EDID spec says modes should be preferred in this order:
@@ -4844,7 +4851,8 @@ void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable)
+ bool rgb_quant_range_selectable,
+ bool is_hdmi2_sink)
{
/*
* CEA-861:
@@ -4868,8 +4876,15 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* YQ-field to match the RGB Quantization Range being transmitted
* (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
* set YQ=1) and the Sink shall ignore the YQ-field."
+ *
+ * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
+ * by non-zero YQ when receiving RGB. There doesn't seem to be any
+ * good way to tell which version of CEA-861 the sink supports, so
+ * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
+ * on on CEA-861-F.
*/
- if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+ if (!is_hdmi2_sink ||
+ rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
else
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index defd6a76fef3..09919e8d67f9 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1824,6 +1824,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
DRM_INFO("Cannot find any crtc or sizes\n");
+
+ /* First time: disable all crtc's.. */
+ if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+ restore_fbdev_mode(fb_helper);
return -EAGAIN;
}
@@ -2048,6 +2052,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
+ if (connector->display_info.non_desktop)
+ return false;
+
if (strict)
enable = connector->status == connector_status_connected;
else
@@ -2067,7 +2074,8 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
+ connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+
any_enabled |= enabled[i];
}
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 19404e34cd59..37a93cdffb4a 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -1030,6 +1030,7 @@ retry:
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
e->event.vbl.user_data = page_flip->user_data;
+ e->event.vbl.crtc_id = crtc->base.id;
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
if (ret) {
kfree(e);
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
index 16c64d067e67..baccc63db106 100644
--- a/drivers/gpu/drm/drm_trace.h
+++ b/drivers/gpu/drm/drm_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_DRM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _DRM_TRACE_H_
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 2c68909637b5..32d9bcf5be7f 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -367,9 +367,9 @@ void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
-static void vblank_disable_fn(unsigned long arg)
+static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
@@ -436,8 +436,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
vblank->dev = dev;
vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
- setup_timer(&vblank->disable_timer, vblank_disable_fn,
- (unsigned long)vblank);
+ timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
seqlock_init(&vblank->seqlock);
}
@@ -1021,7 +1020,7 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
if (drm_vblank_offdelay == 0)
return;
else if (drm_vblank_offdelay < 0)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
@@ -1652,7 +1651,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (disable_irq)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
return true;
}
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 15c3bfa89a79..1281c8d4fae5 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
etnaviv-y := \
etnaviv_buffer.o \
etnaviv_cmd_parser.o \
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5884ab623e0a..daee3f1196df 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -760,7 +760,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
up_read(&mm->mmap_sem);
if (ret < 0) {
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
return ERR_PTR(ret);
}
@@ -833,7 +833,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
}
}
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
work = kmalloc(sizeof(*work), GFP_KERNEL);
@@ -867,7 +867,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
if (etnaviv_obj->pages) {
int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- release_pages(etnaviv_obj->pages, npages, 0);
+ release_pages(etnaviv_obj->pages, npages);
kvfree(etnaviv_obj->pages);
}
put_task_struct(etnaviv_obj->userptr.task);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 8197e1d6ed11..e19cbe05da2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -968,9 +968,9 @@ static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
}
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
{
- struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
+ struct etnaviv_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
u32 fence = gpu->completed_fence;
bool progress = false;
@@ -1765,8 +1765,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
- setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, TIMER_DEFERRABLE);
priv->gpu[priv->num_gpus++] = gpu;
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
index 368218304566..c27c1484cfa9 100644
--- a/drivers/gpu/drm/etnaviv/state.xml.h
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_XML
#define STATE_XML
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h
index d7146fd13943..73a97d35c51b 100644
--- a/drivers/gpu/drm/etnaviv/state_3d.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_3D_XML
#define STATE_3D_XML
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 43c73e2ed34f..60808daf7e8d 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_HI_XML
#define STATE_HI_XML
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index f663490e949d..bdf4212dde7b 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 53e03f8af3d5..e6b0940b1ac2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -161,9 +161,9 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
.atomic_flush = exynos_crtc_handle_event,
};
-static void vidi_fake_vblank_timer(unsigned long arg)
+static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = (void *)arg;
+ struct vidi_context *ctx = from_timer(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
@@ -449,7 +449,7 @@ static int vidi_probe(struct platform_device *pdev)
ctx->pdev = pdev;
- setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
+ timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/fsl-dcu/Makefile b/drivers/gpu/drm/fsl-dcu/Makefile
index aca34f656bea..b55c4482d0f9 100644
--- a/drivers/gpu/drm/fsl-dcu/Makefile
+++ b/drivers/gpu/drm/fsl-dcu/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
fsl-dcu-drm-y := fsl_dcu_drm_drv.o \
fsl_dcu_drm_kms.o \
fsl_dcu_drm_rgb.o \
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 58e9e0601a61..faf17b83b910 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
return PTR_ERR(fsl_dev->state);
}
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_disable_unprepare(fsl_dev->clk);
return 0;
@@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock();
@@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm);
- enable_irq(fsl_dev->irq);
return 0;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index edd7d8127d19..c54806d08dd7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
{
struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
- struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
int ret;
fsl_dev->connector.encoder = encoder;
@@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
if (ret < 0)
goto err_sysfs;
- drm_object_property_set_value(&connector->base,
- mode_config->dpms_property,
- DRM_MODE_DPMS_OFF);
-
ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index c1c8dc18aa53..c8f2c89be99d 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# KMS driver for the GMA500
#
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index a77acfc1852e..b20100c18ffb 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 60981505763c..cd3f0873bbdd 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -601,9 +601,9 @@ tda998x_reset(struct tda998x_priv *priv)
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
-static void tda998x_edid_delay_done(unsigned long data)
+static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = (struct tda998x_priv *)data;
+ struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
@@ -1491,8 +1491,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
- setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
- (unsigned long)priv);
+ timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
/* wake up the device: */
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6c3b0481ef82..2acf3b3c5f9d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index f5486cb94818..2641ba510a61 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 701a3c6f1669..85d4c57870fb 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1628,7 +1628,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
- uint32_t bb_size;
+ int bb_size;
void *dst = NULL;
int ret = 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 3c318439a659..355120865efd 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -282,6 +282,7 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
@@ -307,6 +308,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type;
emulate_monitor_status_change(vgpu);
+ vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 4427be18e4a9..940cdaaa3f24 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 2801d70579d8..8e331142badb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -311,9 +311,9 @@ static inline int gtt_set_entry64(void *pt,
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a5bed2e71b92..44cd5ff5e97d 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1381,40 +1381,6 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 v = *(u32 *)p_data;
-
- if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
- return intel_vgpu_default_mmio_write(vgpu,
- offset, p_data, bytes);
-
- switch (offset) {
- case 0x4ddc:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
- break;
- case 0x42080:
- /* bypass WaCompressedResourceDisplayNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
- break;
- case 0xe194:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
- break;
- case 0x7014:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -1671,8 +1637,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
@@ -2564,8 +2530,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2615,8 +2580,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
MMIO_D(0x45504, D_SKL_PLUS);
MMIO_D(0x45520, D_SKL_PLUS);
MMIO_D(0x46000, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f6ded475bb2c..3ac1dc97a7a0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -140,9 +140,10 @@ static int shadow_context_status_change(struct notifier_block *nb,
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
+ unsigned long flags;
if (!is_gvt_request(req)) {
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
@@ -150,7 +151,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
return NOTIFY_OK;
}
@@ -161,7 +162,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
@@ -170,7 +171,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
ring_id, workload->vgpu->id);
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
@@ -253,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
@@ -299,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
+ workload->shadowed = true;
+ return 0;
+
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+ return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@@ -313,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
- workload->shadowed = true;
return 0;
err_unpin:
engine->context_unpin(engine, shadow_ctx);
-err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2d694f6c0907..b9f872204d7e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3db5851756f0..2cf10d17acfb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1325,7 +1325,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
- pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME;
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
ret = i915_driver_init_early(dev_priv, ent);
if (ret < 0)
@@ -1714,6 +1714,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_guc_resume(dev_priv);
intel_modeset_init_hw(dev);
+ intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
@@ -2618,6 +2619,8 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
+ intel_uncore_runtime_resume(dev_priv);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94b23fcbc989..3a140eedfc83 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2267,8 +2267,10 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
struct radix_tree_iter iter;
void __rcu **slot;
+ rcu_read_lock();
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+ rcu_read_unlock();
}
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e304dcbc6042..f782cf2069c1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -104,12 +104,14 @@ static void lut_close(struct i915_gem_context *ctx)
kmem_cache_free(ctx->i915->luts, lut);
}
+ rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
__i915_gem_object_release_unless_active(vma->obj);
}
+ rcu_read_unlock();
}
static void i915_gem_context_free(struct i915_gem_context *ctx)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3d7190764f10..435ed95df144 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -343,6 +343,10 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
(vma->node.start + vma->node.size - 1) >> 32)
return true;
+ if (flags & __EXEC_OBJECT_NEEDS_MAP &&
+ !i915_vma_is_map_and_fenceable(vma))
+ return true;
+
return false;
}
@@ -2100,6 +2104,11 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
+ err = -EINVAL;
+ goto err;
+ }
+
syncobj = drm_syncobj_find(file, fence.handle);
if (!syncobj) {
DRM_DEBUG("Invalid syncobj handle provided\n");
@@ -2107,6 +2116,9 @@ get_fence_array(struct drm_i915_gem_execbuffer2 *args,
goto err;
}
+ BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
+ ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
+
fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5eaa6893daaa..2af65ecf2df8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -958,10 +958,14 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
}
}
-struct sgt_dma {
+static inline struct sgt_dma {
struct scatterlist *sg;
dma_addr_t dma, max;
-};
+} sgt_dma(struct i915_vma *vma) {
+ struct scatterlist *sg = vma->pages->sgl;
+ dma_addr_t addr = sg_dma_address(sg);
+ return (struct sgt_dma) { sg, addr, addr + sg->length };
+}
struct gen8_insert_pte {
u16 pml4e;
@@ -1042,11 +1046,7 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
@@ -1158,11 +1158,7 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sgt_dma iter = {
- .sg = vma->pages->sgl,
- .dma = sg_dma_address(iter.sg),
- .max = iter.dma + iter.sg->length,
- };
+ struct sgt_dma iter = sgt_dma(vma);
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
@@ -1869,13 +1865,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
- struct sgt_dma iter;
+ struct sgt_dma iter = sgt_dma(vma);
gen6_pte_t *vaddr;
vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
- iter.sg = vma->pages->sgl;
- iter.dma = sg_dma_address(iter.sg);
- iter.max = iter.dma + iter.sg->length;
do {
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -2107,7 +2100,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
INIT_LIST_HEAD(&vm->unbound_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
- pagevec_init(&vm->free_pages, false);
+ pagevec_init(&vm->free_pages);
}
static void i915_address_space_fini(struct i915_address_space *vm)
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e26b23171b56..382a77a1097e 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -172,7 +172,9 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT_CACHED;
- mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+ mn->wq = alloc_workqueue("i915-userptr-release",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 0);
if (mn->wq == NULL) {
kfree(mn);
return ERR_PTR(-ENOMEM);
@@ -547,7 +549,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -660,7 +662,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned, 0);
+ release_pages(pvec, pinned);
kvfree(pvec);
return PTR_ERR_OR_ZERO(pages);
@@ -827,7 +829,7 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
dev_priv->mm.userptr_wq =
alloc_workqueue("i915-userptr-acquire",
- WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ WQ_HIGHPRI | WQ_UNBOUND,
0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
index e2993857df37..888b7d3f04c3 100644
--- a/drivers/gpu/drm/i915/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/i915_gemfs.c
@@ -52,7 +52,8 @@ int i915_gemfs_init(struct drm_i915_private *i915)
if (has_transparent_hugepage()) {
struct super_block *sb = gemfs->mnt_sb;
- char options[] = "huge=within_size";
+ /* FIXME: Disabled until we get W/A for read BW issue. */
+ char options[] = "huge=never";
int flags = 0;
int err;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9cab91ddeb79..4e76768ffa95 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _I915_TRACE_H_
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index f1df2bd4ecf4..463a7177997c 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright © 2009 Intel Corporation
*
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 42fb436f6cdc..d1abf4bb7c81 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel ACPI functions
*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 48e1ba01ccf8..5f8b9f1f40f1 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -517,6 +517,7 @@ static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
rb_erase(&wait->node, &b->waiters);
+ RB_CLEAR_NODE(&wait->node);
out:
GEM_BUG_ON(b->irq_wait == wait);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 053ae6d3d339..e0fffd883b54 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15198,6 +15198,23 @@ void intel_connector_unregister(struct drm_connector *connector)
intel_panel_destroy_backlight(connector);
}
+static void intel_hpd_poll_fini(struct drm_device *dev)
+{
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ /* First disable polling... */
+ drm_kms_helper_poll_fini(dev);
+
+ /* Then kill the work that may have been queued by hpd. */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ if (connector->modeset_retry_work.func)
+ cancel_work_sync(&connector->modeset_retry_work);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -15218,7 +15235,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
*/
- drm_kms_helper_poll_fini(dev);
+ intel_hpd_poll_fini(dev);
/* poll work can call into fbdev, hence clean that up afterwards */
intel_fbdev_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d11281004109..65260fb35d2a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3735,9 +3735,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
}
- /* Read the eDP Display control capabilities registers */
- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ /*
+ * Read the eDP display control registers.
+ *
+ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
+ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
+ * set, but require eDP 1.4+ detection (e.g. for supported link rates
+ * method). The display control registers should read zero if they're
+ * not supported anyway.
+ */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 47d022d48718..6c7f8bca574e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -499,7 +499,6 @@ struct intel_crtc_scaler_state {
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- struct intel_wm_level raw_wm[5];
uint32_t linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
@@ -1737,7 +1736,7 @@ extern struct drm_display_mode *intel_find_panel_downclock(
int intel_backlight_device_register(struct intel_connector *connector);
void intel_backlight_device_unregister(struct intel_connector *connector);
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
{
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b8af35187d22..ea96682568e8 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -697,10 +697,8 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
/* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp)) {
+ ifbdev->preferred_bpp))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
- intel_fbdev_fini(to_i915(ifbdev->helper.dev));
- }
}
void intel_fbdev_initial_config_async(struct drm_device *dev)
@@ -800,7 +798,11 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
- if (ifbdev)
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_sync(ifbdev);
+ if (ifbdev->vma)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 59247a49a077..e039702c1907 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -487,7 +487,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- intel_hdmi->rgb_quant_range_selectable);
+ intel_hdmi->rgb_quant_range_selectable,
+ is_hdmi2_sink);
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index eb5827110d8f..49fdf09f9919 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -438,7 +438,9 @@ static bool
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
- !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ msgs[i].addr == msgs[i + 1].addr &&
+ !(msgs[i].flags & I2C_M_RD) &&
+ (msgs[i].len == 1 || msgs[i].len == 2) &&
(msgs[i + 1].flags & I2C_M_RD));
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aa12a44e9a76..f4a4e9496893 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2721,9 +2721,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_crtc *intel_crtc,
int level,
struct intel_crtc_state *cstate,
- struct intel_plane_state *pristate,
- struct intel_plane_state *sprstate,
- struct intel_plane_state *curstate,
+ const struct intel_plane_state *pristate,
+ const struct intel_plane_state *sprstate,
+ const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -3043,28 +3043,24 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
struct intel_pipe_wm *pipe_wm;
struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane;
- struct intel_plane_state *pristate = NULL;
- struct intel_plane_state *sprstate = NULL;
- struct intel_plane_state *curstate = NULL;
+ struct drm_plane *plane;
+ const struct drm_plane_state *plane_state;
+ const struct intel_plane_state *pristate = NULL;
+ const struct intel_plane_state *sprstate = NULL;
+ const struct intel_plane_state *curstate = NULL;
int level, max_level = ilk_wm_max_level(dev_priv), usable_level;
struct ilk_wm_maximums max;
pipe_wm = &cstate->wm.ilk.optimal;
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- struct intel_plane_state *ps;
-
- ps = intel_atomic_get_existing_plane_state(state,
- intel_plane);
- if (!ps)
- continue;
+ drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
+ const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
- if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
pristate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
+ else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
sprstate = ps;
- else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
+ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
curstate = ps;
}
@@ -3086,11 +3082,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (pipe_wm->sprites_scaled)
usable_level = 0;
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
- pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
-
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- pipe_wm->wm[0] = pipe_wm->raw_wm[0];
+ ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
+ pristate, sprstate, curstate, &pipe_wm->wm[0]);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
@@ -3100,8 +3094,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
- for (level = 1; level <= max_level; level++) {
- struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
+ for (level = 1; level <= usable_level; level++) {
+ struct intel_wm_level *wm = &pipe_wm->wm[level];
ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
pristate, sprstate, curstate, wm);
@@ -3111,13 +3105,10 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
* register maximums since such watermarks are
* always invalid.
*/
- if (level > usable_level)
- continue;
-
- if (ilk_validate_wm_level(level, &max, wm))
- pipe_wm->wm[level] = *wm;
- else
- usable_level = level;
+ if (!ilk_validate_wm_level(level, &max, wm)) {
+ memset(wm, 0, sizeof(*wm));
+ break;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6a42ed618a28..2863d5a65187 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 20e3c65c0999..8c2ce81f01c2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -434,6 +434,12 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
}
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+}
+
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
i915_modparams.enable_rc6 =
@@ -1240,8 +1246,15 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* bus, which will be busy after this notification, leading to:
* "render: timed out waiting for forcewake ack request."
* errors.
+ *
+ * The notifier is unregistered during intel_runtime_suspend(),
+ * so it's ok to access the HW here without holding a RPM
+ * wake reference -> disable wakeref asserts for the time of
+ * the access.
*/
+ disable_rpm_wakeref_asserts(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ enable_rpm_wakeref_asserts(dev_priv);
break;
case MBI_PMIC_BUS_ACCESS_END:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 582771251b57..9ce079b5dd0d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -134,6 +134,7 @@ bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv
void intel_uncore_fini(struct drm_i915_private *dev_priv);
void intel_uncore_suspend(struct drm_i915_private *dev_priv);
void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 54a73534b37e..d7dd98a6acad 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 9961b44f76ed..19c6fce837df 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as subtest__name to create
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
index 828904b7d468..54fc571b1102 100644
--- a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -271,13 +271,7 @@ struct igt_wakeup {
u32 seqno;
};
-static int wait_atomic(atomic_t *p)
-{
- schedule();
- return 0;
-}
-
-static int wait_atomic_timeout(atomic_t *p)
+static int wait_atomic_timeout(atomic_t *p, unsigned int mode)
{
return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
}
@@ -348,7 +342,7 @@ static void igt_wake_all_sync(atomic_t *ready,
atomic_set(ready, 0);
wake_up_all(wq);
- wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
+ wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE);
atomic_set(ready, count);
atomic_set(done, count);
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index 3790fdf44a1a..b26f07b55d86 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -49,9 +49,9 @@ void onstack_fence_fini(struct i915_sw_fence *fence)
i915_sw_fence_fini(fence);
}
-static void timed_fence_wake(unsigned long data)
+static void timed_fence_wake(struct timer_list *t)
{
- struct timed_fence *tf = (struct timed_fence *)data;
+ struct timed_fence *tf = from_timer(tf, t, timer);
i915_sw_fence_commit(&tf->fence);
}
@@ -60,7 +60,7 @@ void timed_fence_init(struct timed_fence *tf, unsigned long expires)
{
onstack_fence_init(&tf->fence);
- setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+ timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
if (time_after(expires, jiffies))
mod_timer(&tf->timer, expires);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
index 4cca4d57f52c..b5dc4e394555 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MOCK_GEM_DEVICE_H__
#define __MOCK_GEM_DEVICE_H__
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
index 9fbf67321662..20acdbee7bd0 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_object.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MOCK_GEM_OBJECT_H__
#define __MOCK_GEM_OBJECT_H__
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 16ecef33e008..ab6c83caceb7 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 93c7e3f9b4a8..17d2f3a1c562 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -133,9 +133,16 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
plane_disabling = true;
}
- if (plane_disabling) {
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ /*
+ * The flip done wait is only strictly required by imx-drm if a deferred
+ * plane disable is in-flight. As the core requires blocking commits
+ * to wait for the flip it is done here unconditionally. This keeps the
+ * workitem around a bit longer than required for the majority of
+ * non-blocking commits, but we accept that for the sake of simplicity.
+ */
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+ if (plane_disabling) {
for_each_old_plane_in_state(state, plane, old_plane_state, i)
ipu_plane_disable_deferred(plane);
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index f6dd64be9cd5..f0b7556c0857 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _IMX_DRM_H_
#define _IMX_DRM_H_
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 53e0b24beda6..9a9961802f5c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -115,7 +115,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc)
if (crtc->state) {
if (crtc->state->mode_blob)
- drm_property_unreference_blob(crtc->state->mode_blob);
+ drm_property_blob_put(crtc->state->mode_blob);
state = to_imx_crtc_state(crtc->state);
memset(state, 0, sizeof(*state));
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 596b24ddbf65..e563ea17a827 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __IPUV3_PLANE_H__
#define __IPUV3_PLANE_H__
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8def97d75030..aedecda9728a 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -183,7 +183,7 @@ static int imx_pd_register(struct drm_device *drm,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
&imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
}
if (imxpd->panel)
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
index a78c4b483e8d..eeb155826d27 100644
--- a/drivers/gpu/drm/lib/drm_random.c
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/random.h>
diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h
index a78644bea7f9..4a3e94dfa0c0 100644
--- a/drivers/gpu/drm/lib/drm_random.h
+++ b/drivers/gpu/drm/lib/drm_random.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __DRM_RANDOM_H__
#define __DRM_RANDOM_H__
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index e37b55a23a65..ce83c396a742 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
mediatek-drm-y := mtk_disp_color.o \
mtk_disp_ovl.o \
mtk_disp_rdma.o \
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index 3ae442a64bd6..c096a9d6bcbc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* MGA Millennium (MGA2064W) functions
* MGA Mystique (MGA1064SG) functions
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index d0b26dd80076..92b3844202d2 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 40f4840ef98e..970c7963ae29 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -82,9 +82,9 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
return NULL;
}
-static void a5xx_preempt_timer(unsigned long data)
+static void a5xx_preempt_timer(struct timer_list *t)
{
- struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
@@ -300,6 +300,5 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
- setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
- (unsigned long) a5xx_gpu);
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8d4477818ec2..232201403439 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -353,9 +353,9 @@ static void hangcheck_timer_reset(struct msm_gpu *gpu)
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
}
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
@@ -703,8 +703,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_WORK(&gpu->recover_work, recover_worker);
- setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
spin_lock_init(&gpu->perf_lock);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
index f26e44ea7389..ebf860bd59af 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <drm/drmP.h>
#include <drm/drm_mode.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index 74a8795c2c2b..f74f1f2b186e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_DISPLAY_H__
#define __NV04_DISPLAY_H__
#include <subdev/bios.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0002.h b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
index 6d72ed38da32..1a8b45b4631f 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0002.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL0002_H__
#define __NVIF_CL0002_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0046.h b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
index a6a71f4ad91e..c0d5eba4f8fc 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0046.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL0046_H__
#define __NVIF_CL0046_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl006b.h b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
index 309ab8a3d9e8..d0e8f35d9e92 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl006b.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL006B_H__
#define __NVIF_CL006B_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 287a7d6fa480..2740278d226b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL0080_H__
#define __NVIF_CL0080_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
index f50866011002..989690fe3cd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506e.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL506E_H__
#define __NVIF_CL506E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
index 0e5bbb553158..5137b6879abd 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl506f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL506F_H__
#define __NVIF_CL506F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index 542d95145a67..7cdf53615d7b 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL5070_H__
#define __NVIF_CL5070_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507a.h b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
index 12e0643b78bd..36e537218596 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507a.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507A_H__
#define __NVIF_CL507A_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507b.h b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
index 99e9d8c47f60..3e643b752bfc 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507b.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507B_H__
#define __NVIF_CL507B_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507c.h b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
index 6af70dbdfd9f..fd9e336d0a24 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507C_H__
#define __NVIF_CL507C_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507d.h b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
index 5ab0c9e4c6a3..e994c6894e3e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507d.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507D_H__
#define __NVIF_CL507D_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl507e.h b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
index c06209f3cac4..8082d2fde248 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl507e.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL507E_H__
#define __NVIF_CL507E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
index 7f6a8ce5a418..1a875090b251 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL826E_H__
#define __NVIF_CL826E_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
index c4d35522331a..e4e50cfe88f1 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL826F_H__
#define __NVIF_CL826F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
index 169161c1587f..ab0fa8adb756 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL906F_H__
#define __NVIF_CL906F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl9097.h b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
index 4057676d2981..e4c8de6d00b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl9097.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CL9097_H__
#define __NVIF_CL9097_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index 3e57089526e3..56f5bd81e480 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CLA06F_H__
#define __NVIF_CLA06F_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 56aade45067d..a7c5bf572788 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CLASS_H__
#define __NVIF_CLASS_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h
index b52a8eadce01..f5df8b30c599 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_CLIENT_H__
#define __NVIF_CLIENT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index b579633b80c0..6edb6266857e 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_DEVICE_H__
#define __NVIF_DEVICE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h
index 0c6f48d8140a..93bccd45a042 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_DRIVER_H__
#define __NVIF_DRIVER_H__
#include <nvif/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/event.h b/drivers/gpu/drm/nouveau/include/nvif/event.h
index 21764499b4be..ec5c924f576a 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_EVENT_H__
#define __NVIF_EVENT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
index c2c0fc41e017..30ecd31db5df 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0000_H__
#define __NVIF_IF0000_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0001.h b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
index bd5b64125eed..ca9215262215 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0001.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0001.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0001_H__
#define __NVIF_IF0001_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0002.h b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
index c04c91d0b818..d9235c011196 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0002.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0002.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0002_H__
#define __NVIF_IF0002_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0003.h b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
index 0cd03efb80a1..ae30b8261b88 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0003.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0003.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0003_H__
#define __NVIF_IF0003_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0004.h b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
index bd5cd428cfd7..b35547c8ea36 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0004.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0004.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0004_H__
#define __NVIF_IF0004_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0005.h b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
index abfd373bb68b..8ed0ae101715 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0005.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0005.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IF0005_H__
#define __NVIF_IF0005_H__
#define NV10_NVSW_NTFY_UEVENT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
index 1886366457f1..b93d586a2304 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_IOCTL_H__
#define __NVIF_IOCTL_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/notify.h b/drivers/gpu/drm/nouveau/include/nvif/notify.h
index 51e2eb580809..4ed169230657 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/notify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_NOTIFY_H__
#define __NVIF_NOTIFY_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/object.h b/drivers/gpu/drm/nouveau/include/nvif/object.h
index 0b54261bdefe..a2d5244ff2b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/object.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_OBJECT_H__
#define __NVIF_OBJECT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/os.h b/drivers/gpu/drm/nouveau/include/nvif/os.h
index 5efdf80d5abc..fd09b2842972 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_OS_H__
#define __NOUVEAU_OS_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvif/unpack.h b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
index 751bcf4930a7..7f0d9f6cc1e7 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/unpack.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/unpack.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVIF_UNPACK_H__
#define __NVIF_UNPACK_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 79624f6d0a2b..757fac823a10 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLIENT_H__
#define __NVKM_CLIENT_H__
#define nvkm_client(p) container_of((p), struct nvkm_client, object)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
index c59fd4e2ad5e..966d1822dd80 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/debug.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEBUG_H__
#define __NVKM_DEBUG_H__
#define NV_DBG_FATAL 0
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 5046e1db99ac..560265b15ec2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_H__
#define __NVKM_DEVICE_H__
#include <core/oclass.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
index 7730499bfd95..ebf8473a39fe 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ENGINE_H__
#define __NVKM_ENGINE_H__
#define nvkm_engine(p) container_of((p), struct nvkm_engine, subdev)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
index 40429a82f792..38acbde2de4f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/enum.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ENUM_H__
#define __NVKM_ENUM_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
index b98fe2de546a..d3c45e90a1c1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/event.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_EVENT_H__
#define __NVKM_EVENT_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index a626ce378f04..ff0fa38aee72 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
index 51691667b813..10eeaeebc242 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPUOBJ_H__
#define __NVKM_GPUOBJ_H__
#include <core/memory.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
index 88971eb37afa..e2d39192fa26 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_IOCTL_H__
#define __NVKM_IOCTL_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 13ebf4da2b96..05f505de0075 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MEMORY_H__
#define __NVKM_MEMORY_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
index 5c1261351138..b0726c39429e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MM_H__
#define __NVKM_MM_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
index 753d08c1767b..4eb82bc563f3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/notify.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NOTIFY_H__
#define __NVKM_NOTIFY_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
index 916a4b76d430..270f893cc154 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OBJECT_H__
#define __NVKM_OBJECT_H__
#include <core/oclass.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
index bd52236cc2f4..d950d5ee188b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/oproxy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OPROXY_H__
#define __NVKM_OPROXY_H__
#define nvkm_oproxy(p) container_of((p), struct nvkm_oproxy, base)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
index 80fdc146e816..a34a79bacbd0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/option.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OPTION_H__
#define __NVKM_OPTION_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
index 1f0108fdd24a..445602d1e8d3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_OS_H__
#define __NVKM_OS_H__
#include <nvif/os.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
index 78d41be20b8c..4c7f647d2dc9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_PCI_H__
#define __NVKM_DEVICE_PCI_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
index 8a48ca67f60d..d5d789663aca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/ramht.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_RAMHT_H__
#define __NVKM_RAMHT_H__
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index a6c21be7537f..63df2290177f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SUBDEV_H__
#define __NVKM_SUBDEV_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 7c7d91cad09a..5c102d0206a7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_TEGRA_H__
#define __NVKM_DEVICE_TEGRA_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
index 904820558fc0..40613983fccb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/bsp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BSP_H__
#define __NVKM_BSP_H__
#include <engine/xtensa.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index b93f4c1a95e5..553245994450 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CE_H__
#define __NVKM_CE_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
index 03fa57a7c30a..72b9da2de7c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/cipher.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CIPHER_H__
#define __NVKM_CIPHER_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index 05f9c13ab8c3..e83193d3ccab 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_H__
#define __NVKM_DISP_H__
#define nvkm_disp(p) container_of((p), struct nvkm_disp, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
index b672a3b07f55..0f9c1c702ed6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DMA_H__
#define __NVKM_DMA_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index f0024fb5a5af..6427747b6f77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FALCON_H__
#define __NVKM_FALCON_H__
#define nvkm_falcon(p) container_of((p), struct nvkm_falcon, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index e42d686fbd8b..c17b3a9bf8fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIFO_H__
#define __NVKM_FIFO_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index c7944b19bed8..fb18f105fc43 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GR_H__
#define __NVKM_GR_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
index 257738eff9f6..4ef3d4c5e358 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mpeg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MPEG_H__
#define __NVKM_MPEG_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
index 748ea9b7e559..985fc9490643 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSENC_H__
#define __NVKM_MSENC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
index 08516ca82e04..e03f33472486 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/mspdec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPDEC_H__
#define __NVKM_MSPDEC_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
index 85fd306021ac..760bf17ea63d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msppp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPPP_H__
#define __NVKM_MSPPP_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
index 99757ed96f76..281866d2501d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/msvld.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSVLD_H__
#define __NVKM_MSVLD_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 00b2b227ff41..fe716859d4a9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVDEC_H__
#define __NVKM_NVDEC_H__
#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
index 8a819328059b..cdd68a8bab8b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVENC_H__
#define __NVKM_NVENC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
index 240855ad8c8d..6cce8502f9df 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/pm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_H__
#define __NVKM_PM_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
index 7317ef4c0207..b206b918c43e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SEC_H__
#define __NVKM_SEC_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
index d3db1b1e75c4..f7d89822b905 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SEC2_H__
#define __NVKM_SEC2_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
index 096e7dbd1e65..83a17c4e11e7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_H__
#define __NVKM_SW_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
index 2b0dc4c695c2..9b7d4877cf41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vic.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VIC_H__
#define __NVKM_VIC_H__
#include <core/engine.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
index 616ea91e03f8..53bf8aed48fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/vp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VP_H__
#define __NVKM_VP_H__
#include <engine/xtensa.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
index b1fcc416732f..13c00ce6d556 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/xtensa.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_XTENSA_H__
#define __NVKM_XTENSA_H__
#define nvkm_xtensa(p) container_of((p), struct nvkm_xtensa, engine)
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index ffa963939e15..f6bd94c7e0f7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BAR_H__
#define __NVKM_BAR_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index a72f3290528a..979e9a144e7b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BIOS_H__
#define __NVKM_BIOS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
index cf202c793a1d..703a5b524b96 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_M0203_H__
#define __NVBIOS_M0203_H__
struct nvbios_M0203T {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
index d34608ff241e..b4e14e45a0e8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0205.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_M0205_H__
#define __NVBIOS_M0205_H__
struct nvbios_M0205T {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
index c7ff8d9526e7..c09376894d12 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0209.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_M0209_H__
#define __NVBIOS_M0209_H__
u32 nvbios_M0209Te(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
index 1c1c52eac97d..901d94ef11b8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/P0260.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_P0260_H__
#define __NVBIOS_P0260_H__
u32 nvbios_P0260Te(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
index 6711732b7cb1..d068586f3263 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bit.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_BIT_H__
#define __NVBIOS_BIT_H__
struct bit_entry {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
index 3f0c7c414026..9a3f9483ee75 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/bmp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_BMP_H__
#define __NVBIOS_BMP_H__
static inline u16
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
index 2ff64a20c0ec..a1c48c6b223b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/boost.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_BOOST_H__
#define __NVBIOS_BOOST_H__
u32 nvbios_boostTe(struct nvkm_bios *, u8 *, u8 *, u8 *, u8 *, u8 *, u8 *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index deb477282dde..ed9e0a6a0011 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_CONN_H__
#define __NVBIOS_CONN_H__
enum dcb_connector_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
index 76fe7d50a1ce..49343d276e11 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/cstep.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_CSTEP_H__
#define __NVBIOS_CSTEP_H__
u32 nvbios_cstepTe(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
index 903d117603d8..63ddc6ed897a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_DCB_H__
#define __NVBIOS_DCB_H__
enum dcb_output_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
index c5a6ebd5a478..423d92de0aae 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_DISP_H__
#define __NVBIOS_DISP_H__
u16 nvbios_disp_table(struct nvkm_bios *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
index b4d39df70d4e..df34b41838d6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_DP_H__
#define __NVBIOS_DP_H__
struct nvbios_dpout {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
index bb49bd5f879e..f93e4f951f2f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/extdev.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_EXTDEV_H__
#define __NVBIOS_EXTDEV_H__
enum nvbios_extdev_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
index a7513e8406a3..09c1d3b9d009 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/fan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_FAN_H__
#define __NVBIOS_FAN_H__
#include <subdev/bios/therm.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
index b7a54e605469..b71a3555c64e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/gpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_GPIO_H__
#define __NVBIOS_GPIO_H__
enum dcb_gpio_func_name {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
index 85c529ecf9b1..ae1f7483dd28 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/i2c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_I2C_H__
#define __NVBIOS_I2C_H__
enum dcb_i2c_type {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
index e933d3eede70..e220a1ac1387 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/iccsense.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_ICCSENSE_H__
#define __NVBIOS_ICCSENSE_H__
struct pwr_rail_resistor_t {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
index e15d63b9a5eb..893288b060de 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/image.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_IMAGE_H__
#define __NVBIOS_IMAGE_H__
struct nvbios_image {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
index 06ab48052128..744b1868e789 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/init.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_INIT_H__
#define __NVBIOS_INIT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
index 4e31b64c5edf..327bf9c4b703 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/mxm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_MXM_H__
#define __NVBIOS_MXM_H__
u16 mxm_table(struct nvkm_bios *, u8 *ver, u8 *hdr);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
index 64a59549b7ea..ee5419b7b45b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/npde.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_NPDE_H__
#define __NVBIOS_NPDE_H__
struct nvbios_npdeT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
index e85931541f4f..1dffe8d6cc81 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pcir.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PCIR_H__
#define __NVBIOS_PCIR_H__
struct nvbios_pcirT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
index 478b1c0d2089..0ee84ea6d737 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/perf.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PERF_H__
#define __NVBIOS_PERF_H__
u32 nvbios_perf_table(struct nvkm_bios *, u8 *ver, u8 *hdr,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
index 5a69978d1e3b..ab964e085f02 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PLL_H__
#define __NVBIOS_PLL_H__
/*XXX: kill me */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
index 3a643df6de04..fb41ecab8f8c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/pmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_PMU_H__
#define __NVBIOS_PMU_H__
struct nvbios_pmuT {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
index f5f4a14c4030..ff12d810dce3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_POWER_BUDGET_H__
#define __NVBIOS_POWER_BUDGET_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
index dca6c060a24f..2b87a38adb7a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/ramcfg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_RAMCFG_H__
#define __NVBIOS_RAMCFG_H__
struct nvbios_ramcfg {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
index 8d8ee13721ec..471eef434b51 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/rammap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_RAMMAP_H__
#define __NVBIOS_RAMMAP_H__
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
index dd3ba960e75d..46a3b15e10ec 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/therm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_THERM_H__
#define __NVBIOS_THERM_H__
struct nvbios_therm_threshold {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
index 38188d4c9ab5..40ceabf37827 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/timing.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_TIMING_H__
#define __NVBIOS_TIMING_H__
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
index bea31cdd1dd1..67419bad584c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vmap.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_VMAP_H__
#define __NVBIOS_VMAP_H__
struct nvbios_vmap {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
index f0baa2c7de09..6b36d5ecb8f9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/volt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_VOLT_H__
#define __NVBIOS_VOLT_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
index 87f804fc3a88..36f3028d58ef 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/vpstate.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_VPSTATE_H__
#define __NVBIOS_VPSTATE_H__
struct nvbios_vpstate_header {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
index 0c0fe234ff12..d1bb5d044585 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/xpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVBIOS_XPIO_H__
#define __NVBIOS_XPIO_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
index 33a057c334f2..7695f7f77a06 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BUS_H__
#define __NVKM_BUS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index e5275f742977..15db75ef0189 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_H__
#define __NVKM_CLK_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 709d786f1808..40558064d589 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVINIT_H__
#define __NVKM_DEVINIT_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index a00fd2e59215..adb78f7d083a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_H__
#define __NVKM_FB_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
index ae201e388487..092193b7f98e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fuse.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FUSE_H__
#define __NVKM_FUSE_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
index 9b9c6d2f90b6..ee54899076e3 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gpio.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPIO_H__
#define __NVKM_GPIO_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index ce23cc6c672e..eef54e9b5d77 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_H__
#define __NVKM_I2C_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index 6e2b70bd2f41..919653c1d101 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_IBUS_H__
#define __NVKM_IBUS_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
index b7a9b041e130..be9475cd94fd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ICCSENSE_H__
#define __NVKM_ICCSENSE_H__
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index 8111c0c3c5ec..36ed520ed2d0 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_INSTMEM_H__
#define __NVKM_INSTMEM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index 4a224fd22e48..95b611554d53 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_LTC_H__
#define __NVKM_LTC_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 58f10890c3b6..61c93c86e2e2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MC_H__
#define __NVKM_MC_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 975c42f620a0..0760b93e9d1f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MMU_H__
#define __NVKM_MMU_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
index ed0250139dae..0fd6d6f8eada 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mxm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MXM_H__
#define __NVKM_MXM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index ac2a695963c1..23803cc859fd 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PCI_H__
#define __NVKM_PCI_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index e7f04732a425..4bc9384046c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PMU_H__
#define __NVKM_PMU_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index 9841f076da2e..b1ac47eb786e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_THERM_H__
#define __NVKM_THERM_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index ff0709652f80..e9b0746826ca 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TIMER_H__
#define __NVKM_TIMER_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index d23209b62c25..f7d3eb647e2e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TOP_H__
#define __NVKM_TOP_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
index ce5636fe2a66..312933ad7c2b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/vga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_VGA_H__
#define __NOUVEAU_VGA_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index 08ef9983c643..8a0f85f5fc1a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VOLT_H__
#define __NVKM_VOLT_H__
#include <core/subdev.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index 327747680324..36fde1ff3ad5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_ABI16_H__
#define __NOUVEAU_ABI16_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7459ef9943ec..5ffcb6683776 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 2f03653aff86..b86294fc99e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_ACPI_H__
#define __NOUVEAU_ACPI_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 23002bdd94a8..7b5cc5c73d20 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_BO_H__
#define __NOUVEAU_BO_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index f29d3a72c48c..14607c16a2bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_CHAN_H__
#define __NOUVEAU_CHAN_H__
#include <nvif/object.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index b799f8dfb2b2..1d01a82d4b6f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DEBUGFS_H__
#define __NOUVEAU_DEBUGFS_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1411bf05b89d..270ba56f2756 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DISPLAY_H__
#define __NOUVEAU_DISPLAY_H__
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e86b8220a4bb..3331e82ae9e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_DRV_H__
#define __NOUVEAU_DRV_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index c36031aa013e..5bd8d30d1657 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index d39f845dda87..fe39998f65cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_GEM_H__
#define __NOUVEAU_GEM_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
index 3b9f2e5463a7..380ede26806c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_IOCTL_H__
#define __NOUVEAU_IOCTL_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 7226f1f60901..b5b5fe40779d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define NV04_PFB_BOOT_0 0x00100000
# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 941bf33bd249..11f6ca89769b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/pagemap.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 25b0de413352..96082b696420 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_TTM_H__
#define __NOUVEAU_TTM_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.h b/drivers/gpu/drm/nouveau/nouveau_usif.h
index c037e3ae8c70..c68f1c65af3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.h
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_USIF_H__
#define __NOUVEAU_USIF_H__
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 48393a4f6331..52e52a360fb1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.h b/drivers/gpu/drm/nouveau/nouveau_vga.h
index ea3ad6974c65..6a3000c88142 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NOUVEAU_VGA_H__
#define __NOUVEAU_VGA_H__
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
index b7a508585304..7616c66803f8 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.h
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV10_FENCE_H_
#define __NV10_FENCE_H_
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index fdfeae12ef62..65336948e807 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4099,7 +4099,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
- struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
struct drm_crtc *crtc;
bool active = false;
@@ -4129,8 +4129,8 @@ nv50_disp_atomic_commit(struct drm_device *dev,
if (ret)
goto err_cleanup;
- for_each_old_plane_in_state(state, plane, old_plane_state, i) {
- struct nv50_wndw_atom *asyw = nv50_wndw_atom(old_plane_state);
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
if (asyw->set.image) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
index d9ca9636a3e3..da130f5058e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
index f0a1cf31c7ca..0b92eb32598d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/fuc/gt215.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gt215_ce_data[] = {
/* 0x0000: ctx_object */
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index 2dce405976ad..0e3d08f11b0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CE_PRIV_H__
#define __NVKM_CE_PRIV_H__
#include <engine/ce.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
index 1bbe76e0740a..6a62021e9861 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_ACPI_H__
#define __NVKM_DEVICE_ACPI_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
index 2c3c3ee3c494..ebcc5c52fbd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_CTRL_H__
#define __NVKM_DEVICE_CTRL_H__
#define nvkm_control(p) container_of((p), struct nvkm_control, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 6c16f3835f44..08d0bf605722 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVICE_PRIV_H__
#define __NVKM_DEVICE_PRIV_H__
#include <core/device.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index 9bb4ad5b0e57..40681db91a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_CHAN_H__
#define __NV50_DISP_CHAN_H__
#define nv50_disp_chan(p) container_of((p), struct nv50_disp_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index de962b7b026d..090e869ae612 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_CONN_H__
#define __NVKM_DISP_CONN_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index ea4a0d062e31..f9b98211da6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_DMAC_H__
#define __NV50_DISP_DMAC_H__
#define nv50_disp_dmac(p) container_of((p), struct nv50_disp_dmac, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
index 59173c290525..495f665a0ee6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_DP_H__
#define __NVKM_DISP_DP_H__
#define nvkm_dp(p) container_of((p), struct nvkm_dp, outp)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
index e82c68f18444..d131cca999dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "hdmi.h"
void pack_hdmi_infoframe(struct packed_hdmi_infoframe *packed_frame,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
index 528f5621a496..45094c6e1425 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmi.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_HDMI_H__
#define __NVKM_DISP_HDMI_H__
#include "ior.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
index b04c49d2eeeb..57030b3a4a75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/head.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_HEAD_H__
#define __NVKM_DISP_HEAD_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index c9e0a8f7b5d5..4548c031b937 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_IOR_H__
#define __NVKM_DISP_IOR_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 6ea19466f436..eb0b8acb1c5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_H__
#define __NV50_DISP_H__
#define nv50_disp(p) container_of((p), struct nv50_disp, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 146d101d4891..ea84d7d5741a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_OUTP_H__
#define __NVKM_DISP_OUTP_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
index 5772f0094129..6c9bfff6d043 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DISP_PRIV_H__
#define __NVKM_DISP_PRIV_H__
#include <engine/disp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index b147cf5b3518..4818fa69ae6c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DISP_ROOT_H__
#define __NV50_DISP_ROOT_H__
#define nv50_disp_root(p) container_of((p), struct nv50_disp_root, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
index deb37ee55c0b..4307cbecd5c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DMA_PRIV_H__
#define __NVKM_DMA_PRIV_H__
#define nvkm_dma(p) container_of((p), struct nvkm_dma, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
index 69a7f1034024..4bbac8a21c71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/dma/user.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DMA_USER_H__
#define __NVKM_DMA_USER_H__
#define nvkm_dmaobj(p) container_of((p), struct nvkm_dmaobj, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index d8019bdacd61..3ffef236189e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIFO_CHAN_H__
#define __NVKM_FIFO_CHAN_H__
#define nvkm_fifo_chan(p) container_of((p), struct nvkm_fifo_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
index fc1142af02cf..b653664e081b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GF100_FIFO_CHAN_H__
#define __GF100_FIFO_CHAN_H__
#define gf100_fifo_chan(p) container_of((p), struct gf100_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 5beb5c628473..1208e3d9dbe2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GK104_FIFO_CHAN_H__
#define __GK104_FIFO_CHAN_H__
#define gk104_fifo_chan(p) container_of((p), struct gk104_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
index 3361a1fd0343..15b06bdf5067 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_FIFO_CHAN_H__
#define __NV04_FIFO_CHAN_H__
#define nv04_fifo_chan(p) container_of((p), struct nv04_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
index d853056e040b..2e3c4005b874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_FIFO_CHAN_H__
#define __NV50_FIFO_CHAN_H__
#define nv50_fifo_chan(p) container_of((p), struct nv50_fifo_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
index b81a2ad48aa4..68f97ba03df6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GF100_FIFO_H__
#define __GF100_FIFO_H__
#define gf100_fifo(p) container_of((p), struct gf100_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 466f1051f91a..1579785cf941 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GK104_FIFO_H__
#define __GK104_FIFO_H__
#define gk104_fifo(p) container_of((p), struct gk104_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
index 03f60004bf7c..1d70542553cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_FIFO_H__
#define __NV04_FIFO_H__
#define nv04_fifo(p) container_of((p), struct nv04_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
index 8ab53948cbb4..a3994e8db462 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_FIFO_H__
#define __NV50_FIFO_H__
#define nv50_fifo(p) container_of((p), struct nv50_fifo, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
index f889b13b5e41..ae76b1aaccd4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIFO_PRIV_H__
#define __NVKM_FIFO_PRIV_H__
#define nvkm_fifo(p) container_of((p), struct nvkm_fifo, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
index 92d56221197b..49892a5e7201 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/regsnv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_FIFO_REGS_H__
#define __NV04_FIFO_REGS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 4731e56fbb11..5199e5aa0cb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GRCTX_NVC0_H__
#define __NVKM_GRCTX_NVC0_H__
#include "gf100.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
index 50e808e9f926..4d67d90261b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxnv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GRCTX_H__
#define __NVKM_GRCTX_H__
#include <core/gpuobj.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
index 12a703fe355d..0323acb739c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x00000064,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
index ffbfc51200f1..1bb265917915 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgf117.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf117_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
index 357f662de571..cf8343a693ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk104.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk104_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
index 4ffc8212a85c..f4bfa109ed27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk110.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk110_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
index 09196206c9bc..59a3e1b2927f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgk208.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk208_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
index 6d7d004363d9..8daa0516704a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/gpcgm107.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gm107_grgpc_data[] = {
/* 0x0000: gpc_mmio_list_head */
0x0000006c,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
index 7538404b8b13..cbf2351f8da8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
index ce000a47ec6d..70830036ffee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgf117.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf117_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
index 1f26cb6a233c..7f2fd84d0c3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk104.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk104_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
index 70436d93efe3..560063789de8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk110.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk110_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
index e0933a07426a..71e85784b615 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgk208.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk208_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
index 9b432823bcbe..d85eac6d1c61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/hubgm107.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gm107_grhub_data[] = {
/* 0x0000: hub_mmio_list_head */
0x00000300,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
index 1718ae4e8224..f87693809c9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/fuc/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GRAPH_OS_H__
#define __NVKM_GRAPH_OS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
index d7c3d86cc99d..d5a376c4dd0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV10_GR_H__
#define __NV10_GR_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
index d6840dc81a29..111c8bb4497b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
index d0cb2b8846ec..979dc5f7b32e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV20_GR_H__
#define __NV20_GR_H__
#define nv20_gr(p) container_of((p), struct nv20_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
index 6c4a00819b4b..e59a28a26d65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv25.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
index 3cad26dbc2b1..e113b2d4c811 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv2a.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index b4e3c50badc7..4aac2c224874 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index e7ed04b935cd..301556503e93 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
index 5e8abacbacc6..5d6926611a5b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv35.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include "nv20.h"
#include "regs.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
index bee8ef2d5697..731400937edd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV40_GR_H__
#define __NV40_GR_H__
#define nv40_gr(p) container_of((p), struct nv40_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
index 1ab6ea436b70..5b9d99bee207 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_GR_H__
#define __NV50_GR_H__
#define nv50_gr(p) container_of((p), struct nv50_gr, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
index 2a52d9f026ec..66359c23cbce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GR_PRIV_H__
#define __NVKM_GR_PRIV_H__
#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
index 90a9873ce522..dc4f936675ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/regs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GR_REGS_H__
#define __NVKM_GR_REGS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
index f0d35beb58df..b31fad8bdaad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV31_MPEG_H__
#define __NV31_MPEG_H__
#define nv31_mpeg(p) container_of((p), struct nv31_mpeg, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
index d5753103ff63..26f9d14151e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MPEG_PRIV_H__
#define __NVKM_MPEG_PRIV_H__
#include <engine/mpeg.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
index d518af4bc9de..db305072a82f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mspdec/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPDEC_PRIV_H__
#define __NVKM_MSPDEC_PRIV_H__
#include <engine/mspdec.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
index 37a91f9d9181..7708e52c9043 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msppp/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSPPP_PRIV_H__
#define __NVKM_MSPPP_PRIV_H__
#include <engine/msppp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
index 9dc1da67d929..66c36049abca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/msvld/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MSVLD_PRIV_H__
#define __NVKM_MSVLD_PRIV_H__
#include <engine/msvld.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
index 353b94f51205..6c300739f621 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVDEC_PRIV_H__
#define __NVKM_NVDEC_PRIV_H__
#include <engine/nvdec.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
index 56d0344853ea..c74fd4557d41 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_NVC0_H__
#define __NVKM_PM_NVC0_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
index da481abe8f7a..3f37b713936c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/nv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_NV40_H__
#define __NVKM_PM_NV40_H__
#define nv40_pm(p) container_of((p), struct nv40_pm, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
index 4ff0475e776c..9fad3611a843 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PM_PRIV_H__
#define __NVKM_PM_PRIV_H__
#define nvkm_pm(p) container_of((p), struct nvkm_pm, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
index 4b57f8814560..6278a0c5fe83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/fuc/g98.fuc0s.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t g98_sec_data[] = {
/* 0x0000: ctx_dma */
/* 0x0000: ctx_dma_query */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
index 7ecc9d4724dc..2f97c806a79d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SEC2_PRIV_H__
#define __NVKM_SEC2_PRIV_H__
#include <engine/sec2.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
index b5be49f0ac56..d42862fc43fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_CHAN_H__
#define __NVKM_SW_CHAN_H__
#define nvkm_sw_chan(p) container_of((p), struct nvkm_sw_chan, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
index 25cdfdef2d46..459afd30a484 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_NV50_H__
#define __NVKM_SW_NV50_H__
#define nv50_sw_chan(p) container_of((p), struct nv50_sw_chan, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
index bcfff62131fe..d7034950ba87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/nvsw.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_NVSW_H__
#define __NVKM_NVSW_H__
#define nvkm_nvsw(p) container_of((p), struct nvkm_nvsw, object)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
index 0ef1318dc2fd..4aca1791abc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sw/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_SW_PRIV_H__
#define __NVKM_SW_PRIV_H__
#define nvkm_sw(p) container_of((p), struct nvkm_sw, engine)
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
index 97b56f759d0b..d515ad994199 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FALCON_PRIV_H__
#define __NVKM_FALCON_PRIV_H__
#include <engine/falcon.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
index e4da39139e95..4f2b66e8d795 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __GF100_BAR_H__
#define __GF100_BAR_H__
#define gf100_bar(p) container_of((p), struct gf100_bar, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
index 140b76f588b6..2fe833f6d9f7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_BAR_H__
#define __NV50_BAR_H__
#define nv50_bar(p) container_of((p), struct nv50_bar, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
index 14398e2dbdf9..01ba5b26666e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BAR_PRIV_H__
#define __NVKM_BAR_PRIV_H__
#define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
index 7d1d3c6b4b72..33435ca16311 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BIOS_PRIV_H__
#define __NVKM_BIOS_PRIV_H__
#define nvkm_bios(p) container_of((p), struct nvkm_bios, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
index 54ec3b131dfd..17ac1812a928 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/hwsq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BUS_HWSQ_H__
#define __NVKM_BUS_HWSQ_H__
#include <subdev/bus.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
index a130f2c642d5..ef01e569352d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bus/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_BUS_PRIV_H__
#define __NVKM_BUS_PRIV_H__
#define nvkm_bus(p) container_of((p), struct nvkm_bus, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
index 8865b59fe575..1ea886a4301f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_NVA3_H__
#define __NVKM_CLK_NVA3_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
index d3c7fb6efa16..f134d979d884 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_CLK_H__
#define __NV50_CLK_H__
#define nv50_clk(p) container_of((p), struct nv50_clk, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
index 44020a30dee8..9a39f1fd2976 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/pll.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PLL_H__
#define __NVKM_PLL_H__
#include <core/os.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
index 51eafc00c8b1..b656177923fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_PRIV_H__
#define __NVKM_CLK_PRIV_H__
#define nvkm_clk(p) container_of((p), struct nvkm_clk, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
index d717e8b8f679..d0715fe84328 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/seq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_CLK_SEQ_H__
#define __NVKM_CLK_SEQ_H__
#include <subdev/bus/hwsq.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
index 4a87c8c2bce8..b18e49847eee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV04_DEVINIT_H__
#define __NV04_DEVINIT_H__
#define nv04_devinit(p) container_of((p), struct nv04_devinit, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 25d2ae3af1c6..315ebaff1165 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV50_DEVINIT_H__
#define __NV50_DEVINIT_H__
#define nv50_devinit(p) container_of((p), struct nv50_devinit, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index e1f6ae58f1d3..5b3097a586dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_DEVINIT_PRIV_H__
#define __NVKM_DEVINIT_PRIV_H__
#define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index e3cf0515bb70..ab261310753a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_RAM_NVC0_H__
#define __NVKM_RAM_NVC0_H__
#define gf100_fb(p) container_of((p), struct gf100_fb, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
index 13231d4b00d9..dacc696387b6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_NV50_H__
#define __NVKM_FB_NV50_H__
#define nv50_fb(p) container_of((p), struct nv50_fb, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index e05d95240e85..9351188d5d76 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_PRIV_H__
#define __NVKM_FB_PRIV_H__
#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index 70fd59dcd06d..330132e95b6f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_RAM_PRIV_H__
#define __NVKM_FB_RAM_PRIV_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
index 9ef9d6aa3721..a65fa5586af8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramfuc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FBRAM_FUC_H__
#define __NVKM_FBRAM_FUC_H__
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
index ec5dcbfcaea8..11f6bb2936b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv40.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NV40_FB_RAM_H__
#define __NV40_FB_RAM_H__
#define nv40_ram(p) container_of((p), struct nv40_ram, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
index 8df7306d5729..d8f5053e8e2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramseq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FBRAM_SEQ_H__
#define __NVKM_FBRAM_SEQ_H__
#include <subdev/bus/hwsq.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
index 1f865f61504e..ad26fcbe9e06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/regsnv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FB_REGS_04_H__
#define __NVKM_FB_REGS_04_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
index b0390b540ef5..3a5595a9e457 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fuse/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FUSE_PRIV_H__
#define __NVKM_FUSE_PRIV_H__
#define nvkm_fuse(p) container_of((p), struct nvkm_fuse, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
index 371bcdbbe0d6..9759f13447bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_GPIO_PRIV_H__
#define __NVKM_GPIO_PRIV_H__
#define nvkm_gpio(p) container_of((p), struct nvkm_gpio, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 9587ab456d9e..7d56c4ba693c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_AUX_H__
#define __NVKM_I2C_AUX_H__
#include "pad.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
index e1be14c23e54..bea0dd33961e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_BUS_H__
#define __NVKM_I2C_BUS_H__
#include "pad.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
index 316c4536f29a..33f0c809e583 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_PAD_H__
#define __NVKM_I2C_PAD_H__
#include <subdev/i2c.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
index bf655a66ef40..f476a69b6cb7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_I2C_PRIV_H__
#define __NVKM_I2C_PRIV_H__
#define nvkm_i2c(p) container_of((p), struct nvkm_i2c, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
index 01caf798cf31..504a6d37ec50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_IBUS_PRIV_H__
#define __NVKM_IBUS_PRIV_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
index e90e0f6ed008..bd599b8252ca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_ICCSENSE_PRIV_H__
#define __NVKM_ICCSENSE_PRIV_H__
#define nvkm_iccsense(p) container_of((p), struct nvkm_iccsense, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index 44651ca42d52..b9e4751b9921 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_INSTMEM_PRIV_H__
#define __NVKM_INSTMEM_PRIV_H__
#define nvkm_instmem(p) container_of((p), struct nvkm_instmem, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 8b95f96e3ffa..e71cc25cc775 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_LTC_PRIV_H__
#define __NVKM_LTC_PRIV_H__
#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 3be4126441e4..8869d79c2b59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MC_PRIV_H__
#define __NVKM_MC_PRIV_H__
#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
index d024d8055fcb..948a48c21be4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MMU_PRIV_H__
#define __NVKM_MMU_PRIV_H__
#define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
index 333e0c01545a..011a67fe4a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/mxms.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVMXM_MXMS_H__
#define __NVMXM_MXMS_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
index 7d970157aed1..6767c2279e7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_MXM_PRIV_H__
#define __NVKM_MXM_PRIV_H__
#define nvkm_mxm(p) container_of((p), struct nvkm_mxm, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
index df2dd08363ad..edb7f00f0de5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/agp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#include "priv.h"
#if defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))
#ifndef __NVKM_PCI_AGP_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
index 86921ec962d6..c17f6063c9ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PCI_PRIV_H__
#define __NVKM_PCI_PRIV_H__
#define nvkm_pci(p) container_of((p), struct nvkm_pci, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 0bcf0b307a61..53d01fb00a8b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf100_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
index fe8905666c67..e1e981966c2d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf119.fuc4.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gf119_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index 9cf4e6fc724e..c4edbc79e41a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gk208_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 5d692425b190..6a2572e8945a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
static uint32_t gt215_pmu_data[] = {
/* 0x0000: proc_kern */
0x52544e49,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
index c8b06cb77e72..30d9480b9be5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/os.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PWR_OS_H__
#define __NVKM_PWR_OS_H__
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
index e6f74168238c..11b28b086a06 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#ifndef __NVKM_PMU_MEMX_H__
#define __NVKM_PMU_MEMX_H__
#include "priv.h"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
index a4c48a10cd47..e9c6f9725afe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_PMU_PRIV_H__
#define __NVKM_PMU_PRIV_H__
#define nvkm_pmu(p) container_of((p), struct nvkm_pmu, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
index f820ca2aeda4..3b8878486faa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TIMER_PRIV_H__
#define __NVKM_TIMER_PRIV_H__
#define nvkm_timer(p) container_of((p), struct nvkm_timer, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
index 10bef85b485e..23d07f5f44d9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/regsnv04.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define NV04_PTIMER_INTR_0 0x009100
#define NV04_PTIMER_INTR_EN_0 0x009140
#define NV04_PTIMER_NUMERATOR 0x009200
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
index adb3ed03d937..4f49b0acaa0e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_TOP_PRIV_H__
#define __NVKM_TOP_PRIV_H__
#define nvkm_top(p) container_of((p), struct nvkm_top, subdev)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
index 354bafe4b4e2..1a8ad560321b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/priv.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_VOLT_PRIV_H__
#define __NVKM_VOLT_PRIV_H__
#define nvkm_volt(p) container_of((p), struct nvkm_volt, subdev)
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index b391be7ecb6c..f115253115c5 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI)
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index c226da145fb3..a349cb61961e 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
config DRM_OMAP_PANEL_DPI
tristate "Generic DPI panel"
+ depends on BACKLIGHT_CLASS_DEVICE
help
Driver for generic DPI panels.
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 46baafb1a83e..d99659e1381b 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 3c5644c3fc38..904101c5e79d 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index daf286fc8a40..ca1e3b489540 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -566,8 +566,8 @@ static int dpi_verify_pll(struct dss_pll *pll)
}
static const struct soc_device_attribute dpi_soc_devices[] = {
- { .family = "OMAP3[456]*" },
- { .family = "[AD]M37*" },
+ { .machine = "OMAP3[456]*" },
+ { .machine = "[AD]M37*" },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index b56a05730314..c2cf6d98e577 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4095,7 +4095,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
@@ -5449,9 +5449,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index d86873f2abe6..e626eddf24d5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -352,7 +352,7 @@ int hdmi4_cec_init(struct platform_device *pdev, struct hdmi_core_data *core,
{
const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
- unsigned int ret;
+ int ret;
core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
"omap4", caps, CEC_MAX_LOG_ADDRS);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index 62e451162d96..b06f9956e733 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -886,25 +886,36 @@ struct hdmi4_features {
bool audio_use_mclk;
};
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
.cts_swmode = false,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
.cts_swmode = true,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
.cts_swmode = true,
.audio_use_mclk = true,
};
static const struct soc_device_attribute hdmi4_soc_devices[] = {
- { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
- { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
- { .family = "OMAP4", .data = &hdmi4_es3_features },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES1.?",
+ .data = &hdmi4430_es1_features,
+ },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES2.?",
+ .data = &hdmi4430_es2_features,
+ },
+ {
+ .family = "OMAP4",
+ .data = &hdmi4_features,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
index 4dfb67fe5f6d..3ecde23ac604 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define DSS_SUBSYS_NAME "HDMI"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 1dd3dafc59af..c60a85e82c6d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
match = of_match_node(dmm_of_match, dev->dev.of_node);
if (!match) {
dev_err(&dev->dev, "failed to find matching device node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
omap_dmm->plat_data = match->data;
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 77ede3467324..2c4e1a93e05f 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index fce1453a93e1..9c5e8dba8ac6 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
pl111_drm-y += pl111_display.o \
pl111_versatile.o \
pl111_drv.o
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 09143b840482..2de40d276116 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -147,6 +147,10 @@ extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file
extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
extern void r128_freelist_reset(struct drm_device *dev);
extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
index 663f38c63ba6..6589f9e0310e 100644
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -63,39 +63,36 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_init32_t init32;
- drm_r128_init_t __user *init;
+ drm_r128_init_t init;
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
- init = compat_alloc_user_space(sizeof(*init));
- if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
- || __put_user(init32.func, &init->func)
- || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
- || __put_user(init32.is_pci, &init->is_pci)
- || __put_user(init32.cce_mode, &init->cce_mode)
- || __put_user(init32.cce_secure, &init->cce_secure)
- || __put_user(init32.ring_size, &init->ring_size)
- || __put_user(init32.usec_timeout, &init->usec_timeout)
- || __put_user(init32.fb_bpp, &init->fb_bpp)
- || __put_user(init32.front_offset, &init->front_offset)
- || __put_user(init32.front_pitch, &init->front_pitch)
- || __put_user(init32.back_offset, &init->back_offset)
- || __put_user(init32.back_pitch, &init->back_pitch)
- || __put_user(init32.depth_bpp, &init->depth_bpp)
- || __put_user(init32.depth_offset, &init->depth_offset)
- || __put_user(init32.depth_pitch, &init->depth_pitch)
- || __put_user(init32.span_offset, &init->span_offset)
- || __put_user(init32.fb_offset, &init->fb_offset)
- || __put_user(init32.mmio_offset, &init->mmio_offset)
- || __put_user(init32.ring_offset, &init->ring_offset)
- || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
- || __put_user(init32.buffers_offset, &init->buffers_offset)
- || __put_user(init32.agp_textures_offset,
- &init->agp_textures_offset))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
+ init.func = init32.func;
+ init.sarea_priv_offset = init32.sarea_priv_offset;
+ init.is_pci = init32.is_pci;
+ init.cce_mode = init32.cce_mode;
+ init.cce_secure = init32.cce_secure;
+ init.ring_size = init32.ring_size;
+ init.usec_timeout = init32.usec_timeout;
+ init.fb_bpp = init32.fb_bpp;
+ init.front_offset = init32.front_offset;
+ init.front_pitch = init32.front_pitch;
+ init.back_offset = init32.back_offset;
+ init.back_pitch = init32.back_pitch;
+ init.depth_bpp = init32.depth_bpp;
+ init.depth_offset = init32.depth_offset;
+ init.depth_pitch = init32.depth_pitch;
+ init.span_offset = init32.span_offset;
+ init.fb_offset = init32.fb_offset;
+ init.mmio_offset = init32.mmio_offset;
+ init.ring_offset = init32.ring_offset;
+ init.ring_rptr_offset = init32.ring_rptr_offset;
+ init.buffers_offset = init32.buffers_offset;
+ init.agp_textures_offset = init32.agp_textures_offset;
+
+ return drm_ioctl_kernel(file, r128_cce_init, &init,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
typedef struct drm_r128_depth32 {
@@ -111,25 +108,19 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_depth32_t depth32;
- drm_r128_depth_t __user *depth;
+ drm_r128_depth_t depth;
if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
return -EFAULT;
- depth = compat_alloc_user_space(sizeof(*depth));
- if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
- || __put_user(depth32.func, &depth->func)
- || __put_user(depth32.n, &depth->n)
- || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
- || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
- || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
- &depth->buffer)
- || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
- &depth->mask))
- return -EFAULT;
-
- return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+ depth.func = depth32.func;
+ depth.n = depth32.n;
+ depth.x = compat_ptr(depth32.x);
+ depth.y = compat_ptr(depth32.y);
+ depth.buffer = compat_ptr(depth32.buffer);
+ depth.mask = compat_ptr(depth32.mask);
+ return drm_ioctl_kernel(file, r128_cce_depth, &depth, DRM_AUTH);
}
typedef struct drm_r128_stipple32 {
@@ -140,18 +131,14 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_stipple32_t stipple32;
- drm_r128_stipple_t __user *stipple;
+ drm_r128_stipple_t stipple;
if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
return -EFAULT;
- stipple = compat_alloc_user_space(sizeof(*stipple));
- if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
- || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
- &stipple->mask))
- return -EFAULT;
+ stipple.mask = compat_ptr(stipple32.mask);
- return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+ return drm_ioctl_kernel(file, r128_cce_stipple, &stipple, DRM_AUTH);
}
typedef struct drm_r128_getparam32 {
@@ -163,19 +150,15 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_getparam32_t getparam32;
- drm_r128_getparam_t __user *getparam;
+ drm_r128_getparam_t getparam;
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
- getparam = compat_alloc_user_space(sizeof(*getparam));
- if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
- || __put_user(getparam32.param, &getparam->param)
- || __put_user((void __user *)(unsigned long)getparam32.value,
- &getparam->value))
- return -EFAULT;
+ getparam.param = getparam32.param;
+ getparam.value = compat_ptr(getparam32.value);
- return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+ return drm_ioctl_kernel(file, r128_getparam, &getparam, DRM_AUTH);
}
drm_ioctl_compat_t *r128_compat_ioctls[] = {
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 8fd2d9f58f77..8fdc56c1c953 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1460,7 +1460,7 @@ static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *fi
return ret;
}
-static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_depth_t *depth = data;
@@ -1492,7 +1492,7 @@ static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *f
return ret;
}
-static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_stipple_t *stipple = data;
@@ -1582,7 +1582,7 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file
return 0;
}
-static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_getparam_t *param = data;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index cf3e5985e3e7..92ccd7aed0d4 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 898f9a078830..a6511918f632 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -5451,28 +5451,6 @@ void cik_pcie_gart_tlb_flush(struct radeon_device *rdev)
WREG32(VM_INVALIDATE_REQUEST, 0x1);
}
-static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
-{
- int i;
- uint32_t sh_mem_bases, sh_mem_config;
-
- sh_mem_bases = 0x6000 | 0x6000 << 16;
- sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
- mutex_lock(&rdev->srbm_mutex);
- for (i = 8; i < 16; i++) {
- cik_srbm_select(rdev, 0, 0, 0, i);
- /* CP and shaders */
- WREG32(SH_MEM_CONFIG, sh_mem_config);
- WREG32(SH_MEM_APE1_BASE, 1);
- WREG32(SH_MEM_APE1_LIMIT, 0);
- WREG32(SH_MEM_BASES, sh_mem_bases);
- }
- cik_srbm_select(rdev, 0, 0, 0, 0);
- mutex_unlock(&rdev->srbm_mutex);
-}
-
/**
* cik_pcie_gart_enable - gart enable
*
@@ -5586,8 +5564,6 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
- cik_pcie_init_compute_vmid(rdev);
-
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index b928c17bdeed..c21d8fa591ef 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* utility to create the register check tables
* this includes inlined list.h safe for userspace.
*
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index eb40888bdfcc..ad16a925f8d5 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#define R100_TRACK_MAX_TEXTURE 3
#define R200_TRACK_MAX_TEXTURE 6
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 1768926d2ebc..183b4b482138 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <drm/drmP.h>
#include <drm/drm_dp_mst_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 2fcf805d3a16..33b821d6d018 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
}
info->par = rfbdev;
- info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3386452bd2f0..cf3deb283da5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
else
r = 0;
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
drm_gem_object_put_unlocked(gobj);
return r;
@@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
r = ret;
/* Flush HDP cache via MMIO if necessary */
- cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 49750d07ab7d..611cf934b211 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 815eaa8c394b..bc26efd1793e 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _RADEON_TRACE_H_
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
index e51d3575976b..66b3d5084662 100644
--- a/drivers/gpu/drm/radeon/radeon_trace_points.c
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010.
* Author : Dave Airlie <airlied@redhat.com>
*/
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8032da57e409..6ada64db00e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -597,7 +597,7 @@ release_sg:
kfree(ttm->sg);
release_pages:
- release_pages(ttm->pages, pinned, 0);
+ release_pages(ttm->pages, pinned);
return r;
}
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 2131e722de3b..0cf5c11030e8 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_drv.o \
rcar_du_encoder.o \
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index a881d2cc4f25..a314e2109e76 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index b15755b6129c..b1fe0639227e 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -1285,8 +1285,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
goto err_pllref;
}
- pm_runtime_enable(dev);
-
dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
dsi->dsi_host.dev = dev;
ret = mipi_dsi_host_register(&dsi->dsi_host);
@@ -1301,6 +1299,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
}
dev_set_drvdata(dev, dsi);
+ pm_runtime_enable(dev);
return 0;
err_mipi_dsi_host:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index a553e182ff53..3acfd576b7df 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -101,9 +101,9 @@ static void psr_set_state(struct psr_drv *psr, enum psr_state state)
spin_unlock_irqrestore(&psr->lock, flags);
}
-static void psr_flush_handler(unsigned long data)
+static void psr_flush_handler(struct timer_list *t)
{
- struct psr_drv *psr = (struct psr_drv *)data;
+ struct psr_drv *psr = from_timer(psr, t, flush_timer);
unsigned long flags;
/* If the state has changed since we initiated the flush, do nothing */
@@ -232,7 +232,7 @@ int rockchip_drm_psr_register(struct drm_encoder *encoder,
if (!psr)
return -ENOMEM;
- setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+ timer_setup(&psr->flush_timer, psr_flush_handler, 0);
spin_lock_init(&psr->lock);
psr->active = true;
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 37bbdac52896..54acc117550c 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* List each unit test as selftest(name, function)
*
* The name is used as both an enum and expanded as igt__name to create
diff --git a/drivers/gpu/drm/shmobile/Makefile b/drivers/gpu/drm/shmobile/Makefile
index 4c3eeb355630..861edafed856 100644
--- a/drivers/gpu/drm/shmobile/Makefile
+++ b/drivers/gpu/drm/shmobile/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
shmob-drm-y := shmob_drm_backlight.o \
shmob_drm_crtc.o \
shmob_drm_drv.o \
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index c35db12435c3..f203ac5514ae 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
sti-drm-y := \
sti_mixer.o \
sti_gdp.o \
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 301b5b1452db..0c2f8c7facae 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
sun4i-backend-y += sun4i_backend.o sun4i_layer.o
sun4i-drm-y += sun4i_drv.o
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 8927784396e8..46d65d39214d 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
tegra-drm-y := \
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 943bdf88c4a2..52552b9b89ef 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
- carveout_start >> order,
- carveout_end >> order);
+ carveout_start >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 4bcacd3f4861..b0a1dedac802 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -174,9 +174,9 @@ struct tegra_sor {
struct reset_control *rst;
struct clk *clk_parent;
- struct clk *clk_brick;
struct clk *clk_safe;
- struct clk *clk_src;
+ struct clk *clk_out;
+ struct clk *clk_pad;
struct clk *clk_dp;
struct clk *clk;
@@ -255,7 +255,7 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
clk_disable_unprepare(sor->clk);
- err = clk_set_parent(sor->clk, parent);
+ err = clk_set_parent(sor->clk_out, parent);
if (err < 0)
return err;
@@ -266,24 +266,24 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
return 0;
}
-struct tegra_clk_sor_brick {
+struct tegra_clk_sor_pad {
struct clk_hw hw;
struct tegra_sor *sor;
};
-static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
{
- return container_of(hw, struct tegra_clk_sor_brick, hw);
+ return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_brick_parents[] = {
+static const char * const tegra_clk_sor_pad_parents[] = {
"pll_d2_out0", "pll_dp"
};
-static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
@@ -304,10 +304,10 @@ static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u8 parent = U8_MAX;
u32 value;
@@ -328,33 +328,33 @@ static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
return parent;
}
-static const struct clk_ops tegra_clk_sor_brick_ops = {
- .set_parent = tegra_clk_sor_brick_set_parent,
- .get_parent = tegra_clk_sor_brick_get_parent,
+static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .set_parent = tegra_clk_sor_pad_set_parent,
+ .get_parent = tegra_clk_sor_pad_get_parent,
};
-static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
- const char *name)
+static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
+ const char *name)
{
- struct tegra_clk_sor_brick *brick;
+ struct tegra_clk_sor_pad *pad;
struct clk_init_data init;
struct clk *clk;
- brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
- if (!brick)
+ pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
+ if (!pad)
return ERR_PTR(-ENOMEM);
- brick->sor = sor;
+ pad->sor = sor;
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_brick_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
- init.ops = &tegra_clk_sor_brick_ops;
+ init.parent_names = tegra_clk_sor_pad_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.ops = &tegra_clk_sor_pad_ops;
- brick->hw.init = &init;
+ pad->hw.init = &init;
- clk = devm_clk_register(sor->dev, &brick->hw);
+ clk = devm_clk_register(sor->dev, &pad->hw);
return clk;
}
@@ -998,8 +998,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return err;
+ }
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
@@ -2007,8 +2009,10 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return;
+ }
div = clk_get_rate(sor->clk) / 1000000 * 4;
@@ -2111,13 +2115,17 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/* switch to parent clock */
- err = clk_set_parent(sor->clk_src, sor->clk_parent);
- if (err < 0)
- dev_err(sor->dev, "failed to set source clock: %d\n", err);
-
- err = tegra_sor_set_parent_clock(sor, sor->clk_src);
- if (err < 0)
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ return;
+ }
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ return;
+ }
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
@@ -2628,11 +2636,24 @@ static int tegra_sor_probe(struct platform_device *pdev)
}
if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
- sor->clk_src = devm_clk_get(&pdev->dev, "source");
- if (IS_ERR(sor->clk_src)) {
- err = PTR_ERR(sor->clk_src);
- dev_err(sor->dev, "failed to get source clock: %d\n",
- err);
+ struct device_node *np = pdev->dev.of_node;
+ const char *name;
+
+ /*
+ * For backwards compatibility with Tegra210 device trees,
+ * fall back to the old clock name "source" if the new "out"
+ * clock is not available.
+ */
+ if (of_property_match_string(np, "clock-names", "out") < 0)
+ name = "source";
+ else
+ name = "out";
+
+ sor->clk_out = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(sor->clk_out)) {
+ err = PTR_ERR(sor->clk_out);
+ dev_err(sor->dev, "failed to get %s clock: %d\n",
+ name, err);
goto remove;
}
}
@@ -2658,16 +2679,60 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
+ /*
+ * Starting with Tegra186, the BPMP provides an implementation for
+ * the pad output clock, so we have to look it up from device tree.
+ */
+ sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
+ if (IS_ERR(sor->clk_pad)) {
+ if (sor->clk_pad != ERR_PTR(-ENOENT)) {
+ err = PTR_ERR(sor->clk_pad);
+ goto remove;
+ }
+
+ /*
+ * If the pad output clock is not available, then we assume
+ * we're on Tegra210 or earlier and have to provide our own
+ * implementation.
+ */
+ sor->clk_pad = NULL;
+ }
+
+ /*
+ * The bootloader may have set up the SOR such that it's module clock
+ * is sourced by one of the display PLLs. However, that doesn't work
+ * without properly having set up other bits of the SOR.
+ */
+ err = clk_set_parent(sor->clk_out, sor->clk_safe);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
+ goto remove;
+ }
+
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
- sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
- pm_runtime_put(&pdev->dev);
+ /*
+ * On Tegra210 and earlier, provide our own implementation for the
+ * pad output clock.
+ */
+ if (!sor->clk_pad) {
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
+ err);
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor,
+ "sor1_pad_clkout");
+ pm_runtime_put(&pdev->dev);
+ }
- if (IS_ERR(sor->clk_brick)) {
- err = PTR_ERR(sor->clk_brick);
- dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ if (IS_ERR(sor->clk_pad)) {
+ err = PTR_ERR(sor->clk_pad);
+ dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
+ err);
goto remove;
}
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index 28fed7e206d0..81ac82455ce4 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -12,14 +12,3 @@ config DRM_TILCDC
controller, for example AM33xx in beagle-bone, DA8xx, or
OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
-config DRM_TILCDC_SLAVE_COMPAT
- bool "Support device tree blobs using TI LCDC Slave binding"
- depends on DRM_TILCDC
- default y
- select OF_RESOLVE
- select OF_OVERLAY
- help
- Choose this option if you need a kernel that is compatible
- with device tree blobs using the obsolete "ti,tilcdc,slave"
- binding. If you find "ti,tilcdc,slave"-string from your DTB,
- you probably need this. Otherwise you do not.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
index 55ebd516728f..87f9480e43b0 100644
--- a/drivers/gpu/drm/tilcdc/Makefile
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -1,10 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
ccflags-y += -Werror
endif
-obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
- tilcdc_slave_compat.dtb.o
-
tilcdc-y := \
tilcdc_plane.o \
tilcdc_crtc.o \
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
deleted file mode 100644
index 482299a6f3b0..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-/*
- * To support the old "ti,tilcdc,slave" binding the binding has to be
- * transformed to the new external encoder binding.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_fdt.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tilcdc_slave_compat.h"
-
-struct kfree_table {
- int total;
- int num;
- void **table;
-};
-
-static int __init kfree_table_init(struct kfree_table *kft)
-{
- kft->total = 32;
- kft->num = 0;
- kft->table = kmalloc(kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table)
- return -ENOMEM;
-
- return 0;
-}
-
-static int __init kfree_table_add(struct kfree_table *kft, void *p)
-{
- if (kft->num == kft->total) {
- void **old = kft->table;
-
- kft->total *= 2;
- kft->table = krealloc(old, kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table) {
- kft->table = old;
- kfree(p);
- return -ENOMEM;
- }
- }
- kft->table[kft->num++] = p;
- return 0;
-}
-
-static void __init kfree_table_free(struct kfree_table *kft)
-{
- int i;
-
- for (i = 0; i < kft->num; i++)
- kfree(kft->table[i]);
-
- kfree(kft->table);
-}
-
-static
-struct property * __init tilcdc_prop_dup(const struct property *prop,
- struct kfree_table *kft)
-{
- struct property *nprop;
-
- nprop = kzalloc(sizeof(*nprop), GFP_KERNEL);
- if (!nprop || kfree_table_add(kft, nprop))
- return NULL;
-
- nprop->name = kstrdup(prop->name, GFP_KERNEL);
- if (!nprop->name || kfree_table_add(kft, nprop->name))
- return NULL;
-
- nprop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
- if (!nprop->value || kfree_table_add(kft, nprop->value))
- return NULL;
-
- nprop->length = prop->length;
-
- return nprop;
-}
-
-static void __init tilcdc_copy_props(struct device_node *from,
- struct device_node *to,
- const char * const props[],
- struct kfree_table *kft)
-{
- struct property *prop;
- int i;
-
- for (i = 0; props[i]; i++) {
- prop = of_find_property(from, props[i], NULL);
- if (!prop)
- continue;
-
- prop = tilcdc_prop_dup(prop, kft);
- if (!prop)
- continue;
-
- prop->next = to->properties;
- to->properties = prop;
- }
-}
-
-static int __init tilcdc_prop_str_update(struct property *prop,
- const char *str,
- struct kfree_table *kft)
-{
- prop->value = kstrdup(str, GFP_KERNEL);
- if (kfree_table_add(kft, prop->value) || !prop->value)
- return -ENOMEM;
- prop->length = strlen(str)+1;
- return 0;
-}
-
-static void __init tilcdc_node_disable(struct device_node *node)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "status";
- prop->value = "disabled";
- prop->length = strlen((char *)prop->value)+1;
-
- of_update_property(node, prop);
-}
-
-static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
-{
- const int size = __dtb_tilcdc_slave_compat_end -
- __dtb_tilcdc_slave_compat_begin;
- static void *overlay_data;
- struct device_node *overlay;
- int ret;
-
- if (!size) {
- pr_warn("%s: No overlay data\n", __func__);
- return NULL;
- }
-
- overlay_data = kmemdup(__dtb_tilcdc_slave_compat_begin,
- size, GFP_KERNEL);
- if (!overlay_data || kfree_table_add(kft, overlay_data))
- return NULL;
-
- of_fdt_unflatten_tree(overlay_data, NULL, &overlay);
- if (!overlay) {
- pr_warn("%s: Unfattening overlay tree failed\n", __func__);
- return NULL;
- }
-
- ret = of_resolve_phandles(overlay);
- if (ret) {
- pr_err("%s: Failed to resolve phandles: %d\n", __func__, ret);
- return NULL;
- }
-
- return overlay;
-}
-
-static const struct of_device_id tilcdc_slave_of_match[] __initconst = {
- { .compatible = "ti,tilcdc,slave", },
- {},
-};
-
-static const struct of_device_id tilcdc_of_match[] __initconst = {
- { .compatible = "ti,am33xx-tilcdc", },
- {},
-};
-
-static const struct of_device_id tilcdc_tda998x_of_match[] __initconst = {
- { .compatible = "nxp,tda998x", },
- {},
-};
-
-static const char * const tilcdc_slave_props[] __initconst = {
- "pinctrl-names",
- "pinctrl-0",
- "pinctrl-1",
- NULL
-};
-
-static void __init tilcdc_convert_slave_node(void)
-{
- struct device_node *slave = NULL, *lcdc = NULL;
- struct device_node *i2c = NULL, *fragment = NULL;
- struct device_node *overlay, *encoder;
- struct property *prop;
- /* For all memory needed for the overlay tree. This memory can
- be freed after the overlay has been applied. */
- struct kfree_table kft;
- int ret;
-
- if (kfree_table_init(&kft))
- return;
-
- lcdc = of_find_matching_node(NULL, tilcdc_of_match);
- slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
-
- if (!slave || !of_device_is_available(lcdc))
- goto out;
-
- i2c = of_parse_phandle(slave, "i2c", 0);
- if (!i2c) {
- pr_err("%s: Can't find i2c node trough phandle\n", __func__);
- goto out;
- }
-
- overlay = tilcdc_get_overlay(&kft);
- if (!overlay)
- goto out;
-
- encoder = of_find_matching_node(overlay, tilcdc_tda998x_of_match);
- if (!encoder) {
- pr_err("%s: Failed to find tda998x node\n", __func__);
- goto out;
- }
-
- tilcdc_copy_props(slave, encoder, tilcdc_slave_props, &kft);
-
- for_each_child_of_node(overlay, fragment) {
- prop = of_find_property(fragment, "target-path", NULL);
- if (!prop)
- continue;
- if (!strncmp("i2c", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, i2c->full_name, &kft))
- goto out;
- if (!strncmp("lcdc", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, lcdc->full_name, &kft))
- goto out;
- }
-
- tilcdc_node_disable(slave);
-
- ret = of_overlay_create(overlay);
- if (ret)
- pr_err("%s: Creating overlay failed: %d\n", __func__, ret);
- else
- pr_info("%s: ti,tilcdc,slave node successfully converted\n",
- __func__);
-out:
- kfree_table_free(&kft);
- of_node_put(i2c);
- of_node_put(slave);
- of_node_put(lcdc);
- of_node_put(fragment);
-}
-
-static int __init tilcdc_slave_compat_init(void)
-{
- tilcdc_convert_slave_node();
- return 0;
-}
-
-subsys_initcall(tilcdc_slave_compat_init);
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
deleted file mode 100644
index 693f8b0aea2d..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * DTS overlay for converting ti,tilcdc,slave binding to new binding.
- *
- * Copyright (C) 2015 Texas Instruments Inc.
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- */
-
-/*
- * target-path property values are simple tags that are replaced with
- * correct values in tildcdc_slave_compat.c. Some properties are also
- * copied over from the ti,tilcdc,slave node.
- */
-
-/dts-v1/;
-/ {
- fragment@0 {
- target-path = "i2c";
- __overlay__ {
- #address-cells = <1>;
- #size-cells = <0>;
- tda19988 {
- compatible = "nxp,tda998x";
- reg = <0x70>;
- status = "okay";
-
- port {
- hdmi_0: endpoint@0 {
- remote-endpoint = <&lcd_0>;
- };
- };
- };
- };
- };
-
- fragment@1 {
- target-path = "lcdc";
- __overlay__ {
- port {
- lcd_0: endpoint@0 {
- remote-endpoint = <&hdmi_0>;
- };
- };
- };
- };
-
- __local_fixups__ {
- fragment@0 {
- __overlay__ {
- tda19988 {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
- fragment@1 {
- __overlay__ {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
-};
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
deleted file mode 100644
index 403d35d87d0b..000000000000
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-/* This header declares the symbols defined in tilcdc_slave_compat.dts */
-
-#ifndef __TILCDC_SLAVE_COMPAT_H__
-#define __TILCDC_SLAVE_COMPAT_H__
-
-extern uint8_t __dtb_tilcdc_slave_compat_begin[];
-extern uint8_t __dtb_tilcdc_slave_compat_end[];
-
-#endif /* __TILCDC_SLAVE_COMPAT_H__ */
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 4d0c938ff4b2..a60e560804e0 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 316f831ad5f0..8d7172e8381d 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -744,12 +744,14 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- if (p++ != pages[i + j])
- break;
+ if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
- if (j == HPAGE_PMD_NR)
- order = HPAGE_PMD_ORDER;
+ if (j == HPAGE_PMD_NR)
+ order = HPAGE_PMD_ORDER;
+ }
#endif
if (page_count(pages[i]) != 1)
@@ -865,20 +867,22 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
i = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- while (npages >= HPAGE_PMD_NR) {
- gfp_t huge_flags = gfp_flags;
+ if (!(gfp_flags & GFP_DMA32)) {
+ while (npages >= HPAGE_PMD_NR) {
+ gfp_t huge_flags = gfp_flags;
- huge_flags |= GFP_TRANSHUGE;
- huge_flags &= ~__GFP_MOVABLE;
- huge_flags &= ~__GFP_COMP;
- p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
- if (!p)
- break;
+ huge_flags |= GFP_TRANSHUGE;
+ huge_flags &= ~__GFP_MOVABLE;
+ huge_flags &= ~__GFP_COMP;
+ p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+ if (!p)
+ break;
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[i++] = p++;
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = p++;
- npages -= HPAGE_PMD_NR;
+ npages -= HPAGE_PMD_NR;
+ }
}
#endif
@@ -1058,7 +1062,6 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
{
unsigned i, j;
@@ -1129,7 +1132,6 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
ttm_pool_unpopulate(&tt->ttm);
}
EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-#endif
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index 719a771f3d5c..f5500df51686 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
# Please keep these build lists sorted!
# core driver code
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ab3bcbe0bb93..984501e3f0b0 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -423,7 +423,8 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- vc4_encoder->rgb_range_selectable);
+ vc4_encoder->rgb_range_selectable,
+ false);
vc4_hdmi_write_infoframe(encoder, &frame);
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 7d7af3a93d94..61b2e5377993 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -208,6 +208,9 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ /* Undo the effects of a previous vc4_irq_uninstall. */
+ enable_irq(dev->irq);
+
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
@@ -225,6 +228,9 @@ vc4_irq_uninstall(struct drm_device *dev)
/* Clear any pending interrupts we might have left. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+ /* Finish any interrupt handler still in flight. */
+ disable_irq(dev->irq);
+
cancel_work_sync(&vc4->overflow_mem_work);
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 8fd52f211e9d..b28876c222b4 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -85,9 +85,9 @@ static const struct dma_fence_ops vgem_fence_ops = {
.timeline_value_str = vgem_fence_timeline_value_str,
};
-static void vgem_fence_timeout(unsigned long data)
+static void vgem_fence_timeout(struct timer_list *t)
{
- struct vgem_fence *fence = (struct vgem_fence *)data;
+ struct vgem_fence *fence = from_timer(fence, t, timer);
dma_fence_signal(&fence->base);
}
@@ -105,7 +105,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
- setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+ timer_setup(&fence->timer, vgem_fence_timeout, 0);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 98aae9809249..d6e84a589ef1 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -238,9 +238,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
if (NULL == vsg->pages)
return -ENOMEM;
- ret = get_user_pages_unlocked((unsigned long)xfer->mem_addr,
- vsg->num_pages, vsg->pages,
- (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0);
+ ret = get_user_pages_fast((unsigned long)xfer->mem_addr,
+ vsg->num_pages, vsg->direction == DMA_FROM_DEVICE,
+ vsg->pages);
if (ret != vsg->num_pages) {
if (ret < 0)
return ret;
@@ -452,9 +452,9 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
static void
-via_dmablit_timer(unsigned long data)
+via_dmablit_timer(struct timer_list *t)
{
- drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -559,8 +559,7 @@ via_init_dmablit(struct drm_device *dev)
init_waitqueue_head(blitq->blit_queue + j);
init_waitqueue_head(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- setup_timer(&blitq->poll_timer, via_dmablit_timer,
- (unsigned long)blitq);
+ timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
}
}
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 7684f613bdc3..f29deec83d1f 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index a365330bbb82..ad80211e1098 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
diff --git a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
index 120eab830eaf..3a195e8106b3 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/vm_basic_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _VM_BASIC_TYPES_H_
#define _VM_BASIC_TYPES_H_
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e84fee3ec4f3..184340d486c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -721,7 +721,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 3bbad22b3748..d6b1c509ae01 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -224,7 +224,7 @@ out:
return ret;
}
-static struct dma_fence_ops vmw_fence_ops = {
+static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index a552e4ea5440..6ac094ee8983 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
- if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
+ if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
index 9df7766a7f9d..b6d966d849dd 100644
--- a/drivers/gpu/drm/zte/Makefile
+++ b/drivers/gpu/drm/zte/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
zxdrm-y := \
zx_drm_drv.o \
zx_hdmi.o \
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index 4fb61bd57aee..b92016ce09b7 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
host1x-y = \
bus.o \
syncpt.o \
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 66ea5acee820..2e57c9cea696 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -320,6 +320,7 @@ struct bus_type host1x_bus_type = {
.name = "host1x",
.match = host1x_device_match,
.pm = &host1x_device_pm_ops,
+ .force_dma = true,
};
static void __host1x_device_del(struct host1x_device *device)
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 773d6337aa30..bf67c3aeb634 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -239,8 +239,7 @@ static int host1x_probe(struct platform_device *pdev)
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order,
- geometry->aperture_start >> order,
- geometry->aperture_end >> order);
+ geometry->aperture_start >> order);
host->iova_end = geometry->aperture_end;
}
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 8cdf9e4ae772..7cc8b47e488b 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 7a4b8362dda8..49bfe6e7d005 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -249,11 +249,8 @@ EXPORT_SYMBOL_GPL(ipu_dc_enable);
void ipu_dc_enable_channel(struct ipu_dc *dc)
{
- int di;
u32 reg;
- di = dc->di;
-
reg = readl(dc->base + DC_WR_CH_CONF);
reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
writel(reg, dc->base + DC_WR_CH_CONF);