summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/smu_v11_0.c')
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c461
1 files changed, 317 insertions, 144 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index f24983a8876d..a7336556dc36 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -35,7 +35,6 @@
#include "smu_v11_0.h"
#include "soc15_common.h"
#include "atom.h"
-#include "amd_pcie.h"
#include "amdgpu_ras.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
@@ -60,9 +59,12 @@ MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin");
+MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
#define SMU11_VOLTAGE_SCALE 4
+#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
+
static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
@@ -172,8 +174,12 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
case CHIP_SIENNA_CICHLID:
chip_name = "sienna_cichlid";
break;
+ case CHIP_NAVY_FLOUNDER:
+ chip_name = "navy_flounder";
+ break;
default:
- BUG();
+ dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
+ return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
@@ -303,6 +309,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
case CHIP_SIENNA_CICHLID:
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
break;
+ case CHIP_NAVY_FLOUNDER:
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
+ break;
default:
dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
@@ -384,7 +393,8 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
version_major = le16_to_cpu(hdr->header.header_version_major);
version_minor = le16_to_cpu(hdr->header.header_version_minor);
- if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
+ if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
+ adev->asic_type == CHIP_NAVY_FLOUNDER) {
dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
switch (version_minor) {
case 0:
@@ -816,6 +826,11 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ /* Navy_Flounder do not support to change display num currently */
+ if (adev->asic_type == CHIP_NAVY_FLOUNDER)
+ return 0;
if (!smu->pm_enabled)
return ret;
@@ -1083,63 +1098,12 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return 0;
}
-int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
- enum smu_clk_type clk_id,
- uint32_t *value)
-{
- int ret = 0;
- uint32_t freq = 0;
-
- if (clk_id >= SMU_CLK_COUNT || !value)
- return -EINVAL;
-
- ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
- if (ret)
- return ret;
-
- freq *= 100;
- *value = freq;
-
- return ret;
-}
-
int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
{
- int ret = 0;
- struct smu_temperature_range range;
- struct amdgpu_device *adev = smu->adev;
-
- memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
-
- ret = smu_get_thermal_temperature_range(smu, &range);
- if (ret)
- return ret;
+ if (smu->smu_table.thermal_controller_type)
+ return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
- if (smu->smu_table.thermal_controller_type) {
- ret = smu_set_thermal_range(smu, range);
- if (ret)
- return ret;
-
- ret = amdgpu_irq_get(adev, &smu->irq_source, 0);
- if (ret)
- return ret;
-
- ret = smu_set_thermal_fan_table(smu);
- if (ret)
- return ret;
- }
-
- adev->pm.dpm.thermal.min_temp = range.min;
- adev->pm.dpm.thermal.max_temp = range.max;
- adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
- adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
- adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
- adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
- adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
- adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
- adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
-
- return ret;
+ return 0;
}
int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
@@ -1152,7 +1116,7 @@ static uint16_t convert_to_vddc(uint8_t vid)
return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
}
-static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
+int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
{
struct amdgpu_device *adev = smu->adev;
uint32_t vdd = 0, val_vid = 0;
@@ -1171,39 +1135,6 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
}
-int smu_v11_0_read_sensor(struct smu_context *smu,
- enum amd_pp_sensors sensor,
- void *data, uint32_t *size)
-{
- int ret = 0;
-
- if(!data || !size)
- return -EINVAL;
-
- switch (sensor) {
- case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
- *size = 4;
- break;
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
- *size = 4;
- break;
- case AMDGPU_PP_SENSOR_VDDGFX:
- ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
- *size = 4;
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret)
- *size = 0;
-
- return ret;
-}
-
int
smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request
@@ -1244,7 +1175,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
return 0;
- ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
+ ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
if(clk_select == SMU_UCLK)
smu->hard_min_uclk_req_from_dal = clk_freq;
@@ -1417,6 +1348,8 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
+ struct smu_context *smu = &adev->smu;
+ uint32_t low, high;
uint32_t val = 0;
switch (state) {
@@ -1437,9 +1370,19 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* For THM irqs */
+ low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
+ smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
+ smu->thermal_range.software_shutdown_temp);
+
val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
@@ -1746,11 +1689,50 @@ int smu_v11_0_baco_exit(struct smu_context *smu)
return ret;
}
+int smu_v11_0_mode1_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ if (!ret)
+ msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
+
int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
+ uint32_t clock_limit;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
clk_id = smu_clk_get_index(smu, clk_type);
if (clk_id < 0) {
@@ -1775,9 +1757,12 @@ failed:
return ret;
}
-int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1785,12 +1770,15 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (clk_id < 0)
return clk_id;
+ if (clk_type == SMU_GFXCLK)
+ amdgpu_gfx_off_ctrl(adev, false);
+
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
param, NULL);
if (ret)
- return ret;
+ goto out;
}
if (min > 0) {
@@ -1798,88 +1786,151 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
param, NULL);
if (ret)
- return ret;
+ goto out;
}
+out:
+ if (clk_type == SMU_GFXCLK)
+ amdgpu_gfx_off_ctrl(adev, true);
+
return ret;
}
-int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
+int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
- uint32_t pcie_gen = 0, pcie_width = 0;
- int ret;
+ int ret = 0, clk_id = 0;
+ uint32_t param;
- if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
- pcie_gen = 3;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- pcie_gen = 2;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- pcie_gen = 1;
- else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
- pcie_gen = 0;
-
- /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
- * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
- * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
- */
- if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
- pcie_width = 6;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
- pcie_width = 5;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
- pcie_width = 4;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
- pcie_width = 3;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
- pcie_width = 2;
- else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
- pcie_width = 1;
-
- ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
+ if (min <= 0 && max <= 0)
+ return -EINVAL;
- if (ret)
- dev_err(adev->dev, "[%s] Attempt to override pcie params failed!\n", __func__);
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
- return ret;
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+ if (max > 0) {
+ param = (uint32_t)((clk_id << 16) | (max & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (min > 0) {
+ param = (uint32_t)((clk_id << 16) | (min & 0xffff));
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ param, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
}
int smu_v11_0_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
+ struct smu_11_0_dpm_context *dpm_context =
+ smu->smu_dpm.dpm_context;
+ struct smu_11_0_dpm_table *gfx_table =
+ &dpm_context->dpm_tables.gfx_table;
+ struct smu_11_0_dpm_table *mem_table =
+ &dpm_context->dpm_tables.uclk_table;
+ struct smu_11_0_dpm_table *soc_table =
+ &dpm_context->dpm_tables.soc_table;
+ struct smu_umd_pstate_table *pstate_table =
+ &smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ uint32_t mclk_min = 0, mclk_max = 0;
+ uint32_t socclk_min = 0, socclk_max = 0;
int ret = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
+ sclk_min = sclk_max = gfx_table->max;
+ mclk_min = mclk_max = mem_table->max;
+ socclk_min = socclk_max = soc_table->max;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
+ sclk_min = sclk_max = gfx_table->min;
+ mclk_min = mclk_max = mem_table->min;
+ socclk_min = socclk_max = soc_table->min;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ sclk_min = gfx_table->min;
+ sclk_max = gfx_table->max;
+ mclk_min = mem_table->min;
+ mclk_max = mem_table->max;
+ socclk_min = soc_table->min;
+ socclk_max = soc_table->max;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = smu_unforce_dpm_levels(smu);
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ mclk_min = mclk_max = pstate_table->uclk_pstate.min;
+ break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
+ sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
+ mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
+ socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
default:
- break;
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
+ }
+
+ /*
+ * Separate MCLK and SOCCLK soft min/max settings are not allowed
+ * on Arcturus.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ mclk_min = mclk_max = 0;
+ socclk_min = socclk_max = 0;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v11_0_set_soft_freq_limited_range(smu,
+ SMU_GFXCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+ }
+
+ if (mclk_min && mclk_max) {
+ ret = smu_v11_0_set_soft_freq_limited_range(smu,
+ SMU_MCLK,
+ mclk_min,
+ mclk_max);
+ if (ret)
+ return ret;
}
+
+ if (socclk_min && socclk_max) {
+ ret = smu_v11_0_set_soft_freq_limited_range(smu,
+ SMU_SOCCLK,
+ socclk_min,
+ socclk_max);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
@@ -1898,3 +1949,125 @@ int smu_v11_0_set_power_source(struct smu_context *smu,
NULL);
}
+int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint16_t level,
+ uint32_t *value)
+{
+ int ret = 0, clk_id = 0;
+ uint32_t param;
+
+ if (!value)
+ return -EINVAL;
+
+ if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ return 0;
+
+ clk_id = smu_clk_get_index(smu, clk_type);
+ if (clk_id < 0)
+ return clk_id;
+
+ param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ param,
+ value);
+ if (ret)
+ return ret;
+
+ /*
+ * BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
+ * now, we un-support it
+ */
+ *value = *value & 0x7fffffff;
+
+ return ret;
+}
+
+int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ return smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ 0xff,
+ value);
+}
+
+int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ struct smu_11_0_dpm_table *single_dpm_table)
+{
+ int ret = 0;
+ uint32_t clk;
+ int i;
+
+ ret = smu_v11_0_get_dpm_level_count(smu,
+ clk_type,
+ &single_dpm_table->count);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
+ return ret;
+ }
+
+ for (i = 0; i < single_dpm_table->count; i++) {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ i,
+ &clk);
+ if (ret) {
+ dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
+ return ret;
+ }
+
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+
+ if (i == 0)
+ single_dpm_table->min = clk;
+ else if (i == single_dpm_table->count - 1)
+ single_dpm_table->max = clk;
+ }
+
+ return 0;
+}
+
+int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min_value,
+ uint32_t *max_value)
+{
+ uint32_t level_count = 0;
+ int ret = 0;
+
+ if (!min_value && !max_value)
+ return -EINVAL;
+
+ if (min_value) {
+ /* by default, level 0 clock value as min value */
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ 0,
+ min_value);
+ if (ret)
+ return ret;
+ }
+
+ if (max_value) {
+ ret = smu_v11_0_get_dpm_level_count(smu,
+ clk_type,
+ &level_count);
+ if (ret)
+ return ret;
+
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ level_count - 1,
+ max_value);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}