summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c')
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c157
1 files changed, 136 insertions, 21 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 384c37875cd0..3be8eb21fd6e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include "hwmgr.h"
@@ -356,6 +357,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
int i;
uint32_t sub_vendor_id, hw_revision;
+ uint32_t top32, bottom32;
struct amdgpu_device *adev = hwmgr->adev;
vega10_initialize_power_tune_defaults(hwmgr);
@@ -499,6 +501,14 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
(hw_revision == 0) &&
(sub_vendor_id != 0x1002))
data->smu_features[GNLD_PCC_LIMIT].supported = true;
+
+ /* Get the SN to turn into a Unique ID */
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
+ top32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
+ bottom32 = smum_get_argument(hwmgr);
+
+ adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
#ifdef PPLIB_VEGA10_EVV_SUPPORT
@@ -2267,8 +2277,8 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
- pp_table->AcgAvfsGb.m1_shift = 0;
- pp_table->AcgAvfsGb.m2_shift = 0;
+ pp_table->AcgAvfsGb.m1_shift = 24;
+ pp_table->AcgAvfsGb.m2_shift = 12;
pp_table->AcgAvfsGb.b_shift = 0;
} else {
@@ -2364,6 +2374,10 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
struct vega10_hwmgr *data = hwmgr->backend;
if (data->smu_features[GNLD_AVFS].supported) {
+ /* Already enabled or disabled */
+ if (!(enable ^ data->smu_features[GNLD_AVFS].enabled))
+ return 0;
+
if (enable) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
@@ -2466,11 +2480,6 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
return;
}
}
-
- if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
- data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
- }
}
/**
@@ -3683,6 +3692,10 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
vega10_update_avfs(hwmgr);
+ /*
+ * Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC.
+ * That will help to keep AVFS disabled.
+ */
data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
return 0;
@@ -3785,6 +3798,18 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
+ *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MEM_TEMP:
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
+ *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_UVD_POWER:
*((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
*size = 4;
@@ -4852,12 +4877,22 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
static int vega10_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *thermal_data)
{
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)hwmgr->pptable;
+ struct vega10_hwmgr *data = hwmgr->backend;
+ PPTable_t *pp_table = &(data->smc_state_table.pp_table);
memcpy(thermal_data, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
- thermal_data->max = table_info->tdp_table->usSoftwareShutdownTemp *
+ thermal_data->max = pp_table->TedgeLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->edge_emergency_max = (pp_table->TedgeLimit + CTF_OFFSET_EDGE) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_crit_max = pp_table->ThotspotLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->hotspot_emergency_max = (pp_table->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_crit_max = pp_table->ThbmLimit *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->mem_emergency_max = (pp_table->ThbmLimit + CTF_OFFSET_HBM)*
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
return 0;
@@ -4988,13 +5023,70 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
return true;
}
+static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
+{
+ struct vega10_hwmgr *data = hwmgr->backend;
+ struct pp_power_state *ps = hwmgr->request_ps;
+ struct vega10_power_state *vega10_ps;
+ struct vega10_single_dpm_table *gfx_dpm_table =
+ &data->dpm_table.gfx_table;
+ struct vega10_single_dpm_table *soc_dpm_table =
+ &data->dpm_table.soc_table;
+ struct vega10_single_dpm_table *mem_dpm_table =
+ &data->dpm_table.mem_table;
+ int max_level;
+
+ if (!ps)
+ return;
+
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].gfx_clock =
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].soc_clock !=
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].soc_clock =
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].mem_clock !=
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].mem_clock =
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
+
+ if (!hwmgr->ps)
+ return;
+
+ ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
+ vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ max_level = vega10_ps->performance_level_count - 1;
+
+ if (vega10_ps->performance_levels[max_level].gfx_clock !=
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].gfx_clock =
+ gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].soc_clock !=
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].soc_clock =
+ soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value;
+
+ if (vega10_ps->performance_levels[max_level].mem_clock !=
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value)
+ vega10_ps->performance_levels[max_level].mem_clock =
+ mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value;
+}
+
static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type)
{
struct vega10_hwmgr *data = hwmgr->backend;
struct phm_ppt_v2_information *table_info = hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk;
- struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table;
+ struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.mem_table;
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk =
&data->odn_dpm_table.vdd_dep_on_socclk;
@@ -5018,7 +5110,8 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
break;
}
if (j == od_vddc_lookup_table->count) {
- od_vddc_lookup_table->entries[j-1].us_vdd =
+ j = od_vddc_lookup_table->count - 1;
+ od_vddc_lookup_table->entries[j].us_vdd =
podn_vdd_dep->entries[i].vddc;
data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
}
@@ -5026,25 +5119,38 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
}
dpm_table = &data->dpm_table.soc_table;
for (i = 0; i < dep_table->count; i++) {
- if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd &&
- dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) {
+ if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[podn_vdd_dep->count-1].vddInd &&
+ dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count-1].clk) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
- podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
- dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
+ for (; (i < dep_table->count) &&
+ (dep_table->entries[i].clk < podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk); i++) {
+ podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[podn_vdd_dep->count-1].clk;
+ dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk;
+ }
+ break;
+ } else {
+ dpm_table->dpm_levels[i].value = dep_table->entries[i].clk;
+ podn_vdd_dep_on_socclk->entries[i].vddc = dep_table->entries[i].vddc;
+ podn_vdd_dep_on_socclk->entries[i].vddInd = dep_table->entries[i].vddInd;
+ podn_vdd_dep_on_socclk->entries[i].clk = dep_table->entries[i].clk;
}
}
if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk <
- podn_vdd_dep->entries[dep_table->count-1].clk) {
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
- podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk;
- dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk;
+ podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk =
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
+ dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value =
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].clk;
}
if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd <
- podn_vdd_dep->entries[dep_table->count-1].vddInd) {
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd) {
data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK;
- podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd;
+ podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd =
+ podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd;
}
}
+ vega10_odn_update_power_state(hwmgr);
}
static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
@@ -5079,6 +5185,11 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
} else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table));
vega10_odn_initial_default_setting(hwmgr);
+ vega10_odn_update_power_state(hwmgr);
+ /* force to update all clock tables */
+ data->need_update_dpm_table = DPMTABLE_UPDATE_SCLK |
+ DPMTABLE_UPDATE_MCLK |
+ DPMTABLE_UPDATE_SOCCLK;
return 0;
} else if (PP_OD_COMMIT_DPM_TABLE == type) {
vega10_check_dpm_table_updated(hwmgr);
@@ -5201,8 +5312,12 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
+
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
hwmgr->pptable_func = &vega10_pptable_funcs;
+ if (amdgpu_passthrough(adev))
+ return vega10_baco_set_cap(hwmgr);
return 0;
}