summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c384
1 files changed, 294 insertions, 90 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 02e6f8c4dde0..5f20cadee343 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -167,8 +167,10 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
if (adev->smu.ppt_funcs->get_current_power_state)
@@ -212,8 +214,10 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
mutex_lock(&adev->pm.mutex);
@@ -307,8 +311,10 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
level = smu_get_performance_level(&adev->smu);
@@ -369,8 +375,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
}
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
current_level = smu_get_performance_level(&adev->smu);
@@ -449,8 +457,10 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
ret = smu_get_power_num_states(&adev->smu, &data);
@@ -491,8 +501,10 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
pm = smu_get_current_power_state(smu);
@@ -567,8 +579,10 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
state = data.states[idx];
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
/* only set user selected power states */
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
@@ -608,8 +622,10 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
@@ -650,8 +666,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
@@ -778,8 +796,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -790,8 +807,10 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
}
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
ret = smu_od_edit_dpm_table(&adev->smu, type,
@@ -847,8 +866,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
@@ -905,8 +926,10 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
pr_debug("featuremask = 0x%llx\n", featuremask);
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
@@ -942,8 +965,10 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
@@ -1001,8 +1026,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
@@ -1039,8 +1066,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1071,11 +1097,13 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
return ret;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
@@ -1101,8 +1129,10 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
@@ -1135,11 +1165,13 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
return ret;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
@@ -1165,8 +1197,10 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
@@ -1199,11 +1233,13 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
return ret;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
else
@@ -1231,8 +1267,10 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
@@ -1265,11 +1303,13 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
return ret;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
else
@@ -1297,8 +1337,10 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
@@ -1331,11 +1373,13 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
return ret;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
else
@@ -1363,8 +1407,10 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
@@ -1397,11 +1443,13 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
return ret;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
- ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
+ ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
else
@@ -1429,8 +1477,10 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
@@ -1462,8 +1512,10 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
@@ -1498,8 +1550,10 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
@@ -1531,8 +1585,10 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
return -EINVAL;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
@@ -1587,8 +1643,10 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
size = smu_get_power_profile_mode(&adev->smu, buf);
@@ -1609,7 +1667,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
const char *buf,
size_t count)
{
- int ret = 0xff;
+ int ret;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t parameter_size = 0;
@@ -1637,8 +1695,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1650,8 +1707,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
parameter[parameter_size] = profile_mode;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev))
ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
@@ -1687,8 +1746,10 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return r;
+ }
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
@@ -1723,8 +1784,10 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return r;
+ }
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
@@ -1770,8 +1833,10 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
return -ENODATA;
ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
return ret;
+ }
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
@@ -1808,9 +1873,76 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
return 0;
}
+/**
+ * DOC: thermal_throttling_logging
+ *
+ * Thermal throttling pulls down the clock frequency and thus the performance.
+ * It's an useful mechanism to protect the chip from overheating. Since it
+ * impacts performance, the user controls whether it is enabled and if so,
+ * the log frequency.
+ *
+ * Reading back the file shows you the status(enabled or disabled) and
+ * the interval(in seconds) between each thermal logging.
+ *
+ * Writing an integer to the file, sets a new logging interval, in seconds.
+ * The value should be between 1 and 3600. If the value is less than 1,
+ * thermal logging is disabled. Values greater than 3600 are ignored.
+ */
+static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
+ adev->ddev->unique,
+ atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
+ adev->throttling_logging_rs.interval / HZ + 1);
+}
+
+static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ long throttling_logging_interval;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = kstrtol(buf, 0, &throttling_logging_interval);
+ if (ret)
+ return ret;
+
+ if (throttling_logging_interval > 3600)
+ return -EINVAL;
+
+ if (throttling_logging_interval > 0) {
+ raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
+ /*
+ * Reset the ratelimit timer internals.
+ * This can effectively restart the timer.
+ */
+ adev->throttling_logging_rs.interval =
+ (throttling_logging_interval - 1) * HZ;
+ adev->throttling_logging_rs.begin = 0;
+ adev->throttling_logging_rs.printed = 0;
+ adev->throttling_logging_rs.missed = 0;
+ raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
+
+ atomic_set(&adev->throttling_logging_enabled, 1);
+ } else {
+ atomic_set(&adev->throttling_logging_enabled, 0);
+ }
+
+ return count;
+}
+
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
@@ -1830,6 +1962,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
};
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
@@ -1872,7 +2005,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (adev->flags & AMD_IS_APU)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(unique_id)) {
- if (!adev->unique_id)
+ if (asic_type != CHIP_VEGA10 &&
+ asic_type != CHIP_VEGA20 &&
+ asic_type != CHIP_ARCTURUS)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_features)) {
if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
@@ -2003,8 +2138,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
switch (channel) {
case PP_TEMP_JUNCTION:
@@ -2134,8 +2271,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
@@ -2172,8 +2311,10 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
return err;
ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, value);
@@ -2220,8 +2361,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
@@ -2272,8 +2415,10 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
if (is_support_sw_smu(adev))
err = smu_get_fan_speed_percent(&adev->smu, &speed);
@@ -2305,8 +2450,10 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &speed);
@@ -2337,8 +2484,10 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
@@ -2365,8 +2514,10 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
@@ -2392,8 +2543,10 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
if (is_support_sw_smu(adev))
err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
@@ -2424,8 +2577,10 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
return -EPERM;
err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
if (is_support_sw_smu(adev))
pwm_mode = smu_get_fan_control_mode(&adev->smu);
@@ -2473,8 +2628,10 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
return -EPERM;
ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return ret;
+ }
if (is_support_sw_smu(adev)) {
pwm_mode = smu_get_fan_control_mode(&adev->smu);
@@ -2519,8 +2676,10 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
return -EINVAL;
err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
if (is_support_sw_smu(adev)) {
smu_set_fan_control_mode(&adev->smu, pwm_mode);
@@ -2551,8 +2710,10 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
@@ -2590,8 +2751,10 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
return -EINVAL;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
@@ -2626,8 +2789,10 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
@@ -2665,11 +2830,13 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, true, true);
+ smu_get_power_limit(&adev->smu, &limit, true);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
@@ -2697,11 +2864,13 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
if (is_support_sw_smu(adev)) {
- smu_get_power_limit(&adev->smu, &limit, false, true);
+ smu_get_power_limit(&adev->smu, &limit, false);
size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
@@ -2740,8 +2909,10 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
err = pm_runtime_get_sync(adev->ddev->dev);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return err;
+ }
if (is_support_sw_smu(adev))
err = smu_set_power_limit(&adev->smu, value);
@@ -2771,8 +2942,10 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
@@ -2806,8 +2979,10 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
return -EPERM;
r = pm_runtime_get_sync(adev->ddev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev->ddev->dev);
return r;
+ }
/* get the sclk */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
@@ -3380,21 +3555,34 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-
- /* enable/disable Low Memory PState for UVD (4k videos) */
- if (adev->asic_type == CHIP_STONEY &&
- adev->uvd.decode_image_width >= WIDTH_4K) {
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
- if (hwmgr && hwmgr->hwmgr_func &&
- hwmgr->hwmgr_func->update_nbdpm_pstate)
- hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
- !enable,
- true);
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
}
}
@@ -3402,10 +3590,24 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ if (adev->family == AMDGPU_FAMILY_SI) {
+ mutex_lock(&adev->pm.mutex);
+ if (enable) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+ mutex_unlock(&adev->pm.mutex);
+
+ amdgpu_pm_compute_clocks(adev);
+ } else {
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+ }
}
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
@@ -3669,8 +3871,10 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
return -EPERM;
r = pm_runtime_get_sync(dev->dev);
- if (r < 0)
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
return r;
+ }
amdgpu_device_ip_get_clockgating_state(adev, &flags);
seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);