From 2ab7e274b86739f4ceed5d94b6879f2d07b2802f Mon Sep 17 00:00:00 2001 From: Yintian Tao Date: Fri, 28 Feb 2020 14:24:42 +0800 Subject: drm/amdgpu: clean wptr on wb when gpu recovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The TDR will be randomly failed due to compute ring test failure. If the compute ring wptr & 0x7ff(ring_buf_mask) is 0x100 then after map mqd the compute ring rptr will be synced with 0x100. And the ring test packet size is also 0x100. Then after invocation of amdgpu_ring_commit, the cp will not really handle the packet on the ring buffer because rptr is equal to wptr. Signed-off-by: Yintian Tao Acked-by: Christian König Reviewed-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 22bbb36c768e..ced29790217c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3513,6 +3513,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3afdbbd6aaad..889154a78c4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3663,6 +3663,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0); amdgpu_ring_clear_ring(ring); } else { amdgpu_ring_clear_ring(ring); -- cgit v1.2.3 From 194bcf35bce4a236059816bc41b3db9c9c92a1bb Mon Sep 17 00:00:00 2001 From: "Tianci.Yin" Date: Fri, 28 Feb 2020 17:10:21 +0800 Subject: drm/amdgpu: disable 3D pipe 1 on Navi1x [why] CP firmware decide to skip setting the state for 3D pipe 1 for Navi1x as there is no use case. [how] Disable 3D pipe 1 on Navi1x. Reviewed-by: Feifei Xu Reviewed-by: Monk Liu Signed-off-by: Tianci.Yin Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 97 ++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 46 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index ced29790217c..02702597ddeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -52,7 +52,7 @@ * 1. Primary ring * 2. Async ring */ -#define GFX10_NUM_GFX_RINGS 2 +#define GFX10_NUM_GFX_RINGS_NV1X 1 #define GFX10_MEC_HPD_SIZE 2048 #define F32_CE_PROGRAM_RAM_SIZE 65536 @@ -1304,7 +1304,7 @@ static int gfx_v10_0_sw_init(void *handle) case CHIP_NAVI14: case CHIP_NAVI12: adev->gfx.me.num_me = 1; - adev->gfx.me.num_pipe_per_me = 2; + adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; @@ -2710,18 +2710,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_commit(ring); /* submit cs packet to copy state 0 to next available state */ - ring = &adev->gfx.gfx_ring[1]; - r = amdgpu_ring_alloc(ring, 2); - if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); - return r; - } - - amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); - amdgpu_ring_write(ring, 0); + if (adev->gfx.num_gfx_rings > 1) { + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + r = amdgpu_ring_alloc(ring, 2); + if (r) { + DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); + return r; + } - amdgpu_ring_commit(ring); + amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_commit(ring); + } return 0; } @@ -2818,39 +2820,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); /* Init gfx ring 1 for pipe 1 */ - mutex_lock(&adev->srbm_mutex); - gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); - ring = &adev->gfx.gfx_ring[1]; - rb_bufsz = order_base_2(ring->ring_size / 8); - tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - /* Initialize the ring buffer's write pointers */ - ring->wptr = 0; - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); - /* Set the wb address wether it's enabled or not */ - rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & - CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); - wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, - lower_32_bits(wptr_gpu_addr)); - WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, - upper_32_bits(wptr_gpu_addr)); - - mdelay(1); - WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); - - rb_addr = ring->gpu_addr >> 8; - WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); - WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); - WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); - - gfx_v10_0_cp_gfx_set_doorbell(adev, ring); - mutex_unlock(&adev->srbm_mutex); - + if (adev->gfx.num_gfx_rings > 1) { + mutex_lock(&adev->srbm_mutex); + gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1); + /* maximum supported gfx ring is 2 */ + ring = &adev->gfx.gfx_ring[1]; + rb_bufsz = order_base_2(ring->ring_size / 8); + tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + /* Initialize the ring buffer's write pointers */ + ring->wptr = 0; + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr)); + /* Set the wb address wether it's enabled or not */ + rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & + CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK); + wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO, + lower_32_bits(wptr_gpu_addr)); + WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI, + upper_32_bits(wptr_gpu_addr)); + + mdelay(1); + WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp); + + rb_addr = ring->gpu_addr >> 8; + WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr); + WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr)); + WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1); + + gfx_v10_0_cp_gfx_set_doorbell(adev, ring); + mutex_unlock(&adev->srbm_mutex); + } /* Switch to pipe 0 */ mutex_lock(&adev->srbm_mutex); gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0); @@ -3967,7 +3971,8 @@ static int gfx_v10_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS; + adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X; + adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; gfx_v10_0_set_kiq_pm4_funcs(adev); -- cgit v1.2.3 From 5ac7fd2f597b88ee81f4748ee50cab06192a8dc3 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Thu, 20 Feb 2020 11:16:14 -0500 Subject: drm/amd/display: Clear link settings on MST disable connector [Why] If we have a single MST display and we disconnect it, we dont disable that link. This causes the old link settings to still exist Now on a replug for MST we think its a link loss and will try to reallocate mst payload which will fail, throwing warning below. [ 129.374192] [drm] Failed to updateMST allocation table forpipe idx:0 [ 129.374206] ------------[ cut here ]------------ [ 129.374284] WARNING: CPU: 14 PID: 1710 at drivers/gpu/drm/amd/amdgpu/../dal-dev/dc/core/dc_link.c:3153 dc_link_allocate_mst_payload+0x1f7/0x220 [amdgpu] [ 129.374285] Modules linked in: amdgpu(OE) amd_iommu_v2 gpu_sched ttm drm_kms_helper drm fb_sys_fops syscopyarea sysfillrect sysimgblt binfmt_misc nls_iso8859_1 edac_mce_amd snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio kvm snd_hda_codec_hdmi snd_hda_intel snd_intel_nhlt snd_hda_codec irqbypass snd_hda_core snd_hwdep snd_pcm snd_seq_midi snd_seq_midi_event snd_rawmidi crct10dif_pclmul snd_seq crc32_pclmul ghash_clmulni_intel snd_seq_device snd_timer snd aesni_intel eeepc_wmi crypto_simd asus_wmi joydev cryptd sparse_keymap input_leds soundcore video glue_helper wmi_bmof mxm_wmi k10temp ccp mac_hid sch_fq_codel parport_pc ppdev lp parport ip_tables x_tables autofs4 hid_generic usbhid hid igb i2c_algo_bit ahci dca i2c_piix4 libahci gpio_amdpt wmi gpio_generic [ 129.374318] CPU: 14 PID: 1710 Comm: kworker/14:2 Tainted: G W OE 5.4.0-rc7bhawan+ #480 [ 129.374318] Hardware name: System manufacturer System Product Name/PRIME X370-PRO, BIOS 0515 03/30/2017 [ 129.374397] Workqueue: events dm_irq_work_func [amdgpu] [ 129.374468] RIP: 0010:dc_link_allocate_mst_payload+0x1f7/0x220 [amdgpu] [ 129.374470] Code: 52 20 e8 1c 63 ad f4 48 8b 5d d0 65 48 33 1c 25 28 00 00 00 b8 01 00 00 00 75 16 48 8d 65 d8 5b 41 5c 41 5d 41 5e 41 5f 5d c3 <0f> 0b e9 fa fe ff ff e8 ed 5b d6 f3 41 0f b6 b6 c4 02 00 00 48 c7 [ 129.374471] RSP: 0018:ffff9f9141e7fcc0 EFLAGS: 00010246 [ 129.374472] RAX: 0000000000000000 RBX: ffff91ef0762f800 RCX: 0000000000000000 [ 129.374473] RDX: 0000000000000005 RSI: ffffffffc0c4a988 RDI: 0000000000000004 [ 129.374474] RBP: ffff9f9141e7fd10 R08: 0000000000000005 R09: 0000000000000000 [ 129.374475] R10: 0000000000000002 R11: 0000000000000001 R12: ffff91eebd510c00 [ 129.374475] R13: ffff91eebd510e58 R14: ffff91ef052c01b8 R15: 0000000000000006 [ 129.374476] FS: 0000000000000000(0000) GS:ffff91ef0ef80000(0000) knlGS:0000000000000000 [ 129.374477] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 129.374478] CR2: 000055623ea01d50 CR3: 0000000408a8c000 CR4: 00000000003406e0 [ 129.374479] Call Trace: [ 129.374550] dc_link_reallocate_mst_payload+0x12e/0x150 [amdgpu] [ 129.374617] dc_link_handle_hpd_rx_irq+0x6d4/0x6e0 [amdgpu] [ 129.374693] handle_hpd_rx_irq+0x77/0x310 [amdgpu] [ 129.374768] dm_irq_work_func+0x53/0x70 [amdgpu] [ 129.374774] process_one_work+0x1fd/0x3f0 [ 129.374776] worker_thread+0x255/0x410 [ 129.374778] kthread+0x121/0x140 [ 129.374780] ? process_one_work+0x3f0/0x3f0 [ 129.374781] ? kthread_park+0x90/0x90 [ 129.374785] ret_from_fork+0x22/0x40 [How] when we disable MST we should clear the cur link settings (lane_count=0 is good enough). This will cause us to not reallocate payloads earlier than expected and not throw the warning Signed-off-by: Bhawanpreet Lakha Reviewed-by: Hersen Wu Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5672f7765919..da73161043d5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -451,6 +451,7 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, aconnector->dc_sink); dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; + aconnector->dc_link->cur_link_settings.lane_count = 0; } drm_connector_unregister(connector); -- cgit v1.2.3 From a0275dfc82c9034eefbeffd556cca6dd239d7925 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Fri, 21 Feb 2020 12:26:19 -0500 Subject: drm/amd/display: fix dcc swath size calculations on dcn1 [Why] Swath sizes are being calculated incorrectly. The horizontal swath size should be the product of block height, viewport width, and bytes per element, but the calculation uses viewport height instead of width. The vertical swath size is similarly incorrectly calculated. The effect of this is that we report the wrong DCC caps. [How] Use viewport width in the horizontal swath size calculation and viewport height in the vertical swath size calculation. Signed-off-by: Josip Pavic Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index f36a0d8cedfe..446ba0a7a4b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -840,8 +840,8 @@ static void hubbub1_det_request_size( hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe); - swath_bytes_horz_wc = height * blk256_height * bpe; - swath_bytes_vert_wc = width * blk256_width * bpe; + swath_bytes_horz_wc = width * blk256_height * bpe; + swath_bytes_vert_wc = height * blk256_width * bpe; *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? false : /* full 256B request */ -- cgit v1.2.3 From 80381d40c9bf5218db06a7d7246c5478c95987ee Mon Sep 17 00:00:00 2001 From: Prike Liang Date: Mon, 2 Mar 2020 09:36:15 +0800 Subject: drm/amd/powerplay: fix pre-check condition for setting clock range This fix will handle some MP1 FW issue like as mclk dpm table in renoir has a reverse dpm clock layout and a zero frequency dpm level as following case. cat pp_dpm_mclk 0: 1200Mhz 1: 1200Mhz 2: 800Mhz 3: 0Mhz Signed-off-by: Prike Liang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 2 +- drivers/gpu/drm/amd/powerplay/smu_v12_0.c | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 99ad4ddbe12f..ad8e9b5628e4 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -222,7 +222,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, { int ret = 0; - if (min <= 0 && max <= 0) + if (min < 0 && max < 0) return -EINVAL; if (!smu_clk_dpm_is_enabled(smu, clk_type)) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 870e6db2907e..518e6597bf2d 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -458,9 +458,6 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_ { int ret = 0; - if (max < min) - return -EINVAL; - switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: -- cgit v1.2.3 From ab65a371dd5f5cba6bd9a58a1a6d4115a71cc5c9 Mon Sep 17 00:00:00 2001 From: Prike Liang Date: Wed, 4 Mar 2020 10:36:21 +0800 Subject: drm/amd/powerplay: map mclk to fclk for COMBINATIONAL_BYPASS case When hit COMBINATIONAL_BYPASS the mclk will be bypass and can export fclk frequency to user usage. Signed-off-by: Prike Liang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/powerplay/renoir_ppt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 861e6410363b..568c041c2206 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -111,8 +111,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = { CLK_MAP(GFXCLK, CLOCK_GFXCLK), CLK_MAP(SCLK, CLOCK_GFXCLK), CLK_MAP(SOCCLK, CLOCK_SOCCLK), - CLK_MAP(UCLK, CLOCK_UMCCLK), - CLK_MAP(MCLK, CLOCK_UMCCLK), + CLK_MAP(UCLK, CLOCK_FCLK), + CLK_MAP(MCLK, CLOCK_FCLK), }; static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = { @@ -280,7 +280,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, break; case SMU_MCLK: count = NUM_MEMCLK_DPM_LEVELS; - cur_value = metrics.ClockFrequency[CLOCK_UMCCLK]; + cur_value = metrics.ClockFrequency[CLOCK_FCLK]; break; case SMU_DCEFCLK: count = NUM_DCFCLK_DPM_LEVELS; -- cgit v1.2.3 From 09ed6ba43e659474878b22d40b141a01d09ec857 Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Thu, 13 Feb 2020 10:50:13 -0500 Subject: drm/amdgpu/display: navi1x copy dcn watermark clock settings to smu resume from s3 (v2) This interface is for dGPU Navi1x. Linux dc-pplib interface depends on window driver dc implementation. For Navi1x, clock settings of dcn watermarks are fixed. the settings should be passed to smu during boot up and resume from s3. boot up: dc calculate dcn watermark clock settings within dc_create, dcn20_resource_construct, then call pplib functions below to pass the settings to smu: smu_set_watermarks_for_clock_ranges smu_set_watermarks_table navi10_set_watermarks_table smu_write_watermarks_table For Renoir, clock settings of dcn watermark are also fixed values. dc has implemented different flow for window driver: dc_hardware_init / dc_set_power_state dcn10_init_hw notify_wm_ranges set_wm_ranges For Linux smu_set_watermarks_for_clock_ranges renoir_set_watermarks_table smu_write_watermarks_table dc_hardware_init -> amdgpu_dm_init dc_set_power_state --> dm_resume therefore, linux dc-pplib interface of navi10/12/14 is different from that of Renoir. v2: add missing unlock in error case Signed-off-by: Hersen Wu Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 69 +++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e8f66fbf399e..e997251a8b57 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1422,6 +1422,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) drm_kms_helper_hotplug_event(dev); } +static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) +{ + struct smu_context *smu = &adev->smu; + int ret = 0; + + if (!is_support_sw_smu(adev)) + return 0; + + /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends + * on window driver dc implementation. + * For Navi1x, clock settings of dcn watermarks are fixed. the settings + * should be passed to smu during boot up and resume from s3. + * boot up: dc calculate dcn watermark clock settings within dc_create, + * dcn20_resource_construct + * then call pplib functions below to pass the settings to smu: + * smu_set_watermarks_for_clock_ranges + * smu_set_watermarks_table + * navi10_set_watermarks_table + * smu_write_watermarks_table + * + * For Renoir, clock settings of dcn watermark are also fixed values. + * dc has implemented different flow for window driver: + * dc_hardware_init / dc_set_power_state + * dcn10_init_hw + * notify_wm_ranges + * set_wm_ranges + * -- Linux + * smu_set_watermarks_for_clock_ranges + * renoir_set_watermarks_table + * smu_write_watermarks_table + * + * For Linux, + * dc_hardware_init -> amdgpu_dm_init + * dc_set_power_state --> dm_resume + * + * therefore, this function apply to navi10/12/14 but not Renoir + * * + */ + switch(adev->asic_type) { + case CHIP_NAVI10: + case CHIP_NAVI14: + case CHIP_NAVI12: + break; + default: + return 0; + } + + mutex_lock(&smu->mutex); + + /* pass data to smu controller */ + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && + !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { + ret = smu_write_watermarks_table(smu); + + if (ret) { + mutex_unlock(&smu->mutex); + DRM_ERROR("Failed to update WMTABLE!\n"); + return ret; + } + smu->watermarks_bitmap |= WATERMARKS_LOADED; + } + + mutex_unlock(&smu->mutex); + + return 0; +} + /** * dm_hw_init() - Initialize DC device * @handle: The base driver device containing the amdgpu_dm device. @@ -1700,6 +1767,8 @@ static int dm_resume(void *handle) amdgpu_dm_irq_resume_late(adev); + amdgpu_dm_smu_write_watermarks_table(adev); + return 0; } -- cgit v1.2.3