summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-09-28 11:27:05 +1000
committerDave Airlie <airlied@redhat.com>2016-09-28 11:27:05 +1000
commit9f4ef05bcdcfdf911b056b471dd3c6a4f331b644 (patch)
treeba8dfba87b4fe5295598f5438881822b6d3395f0
parent81c5d6aa3983662b6b48b504fe3a0a4c640f6a84 (diff)
parentbeb86f29c9c7f2d04f9a42c4c61cc469c3689779 (diff)
Merge branch 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
Last set of radeon and amdgpu changes for 4.9. This is mostly just the powerplay cleanup for dGPUs. Beyond that, just misc code cleanups and bug fixes. * 'drm-next-4.9' of git://people.freedesktop.org/~agd5f/linux: (49 commits) drm/amd/amdgpu: Clean up afmt allocation in DCEv6. (v2) drm/amd/amdgpu: Remove division from vblank_wait drm/radeon/atif: Send a hotplug event when we get dgpu display request drm/radeon/atpx: check for ATIF dGPU wake for display events support drm/amdgpu/atif: Send a hotplug event when we get dgpu display request drm/amdgpu/atpx: check for ATIF dGPU wake for display events support drm/amdgpu: bump version for new vce packet support drm/amdgpu/vce: allow the clock table packet drm/amdgpu:cleanup virt related define drm/amdgpu: use powerplay module for dgpu in Vi. drm/amdgpu: set gfx clock gating for tonga/polaris. drm/amdgpu: set system clock gating for tonga/polaris. drm/amd/powerplay: export function to help to set cg by smu. drm/amdgpu: avoid out of bounds access on array interrupt_status_offsets drm/amdgpu: mark symbols static where possible drm/amdgpu: remove unused functions drm/amd/powerplay: Replace per-asic print_performance with generic drm/radeon: narrow asic_init for virtualization drm/amdgpu:add fw version entry to info drm/amdgpu:determine if vPost is needed indeed ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h)45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c863
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c677
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_dpm.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c862
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/si/sid.h35
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/include/cgs_common.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c120
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c175
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c121
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c5600
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c610
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h81
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c126
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h38
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c5684
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.h424
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.c490
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_powertune.h74
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.c595
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/iceland_thermal.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_dyn_defaults.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c5289
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c716
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c)160
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.h)25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h55
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c4359
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h)241
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c)985
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h)43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.c)258
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h58
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.c350
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_dyn_defaults.h107
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c6370
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h402
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.c495
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_thermal.h61
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_common.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smum.h)43
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h70
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c2374
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h (renamed from drivers/gpu/drm/amd/amdgpu/fiji_smum.h)41
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c612
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c2576
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_clockpowergating.h)26
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c613
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h63
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c2287
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h (renamed from drivers/gpu/drm/amd/amdgpu/tonga_smum.h)32
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c703
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h41
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c589
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h87
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c101
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c3207
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/tonga_powertune.h)56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c672
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
106 files changed, 18603 insertions, 35882 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index dc6df075bafc..d15e9b080ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -52,10 +52,7 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_dpm.o \
amdgpu_powerplay.o \
- cz_smc.o cz_dpm.o \
- tonga_smc.o tonga_dpm.o \
- fiji_smc.o fiji_dpm.o \
- iceland_smc.o iceland_dpm.o
+ cz_smc.o cz_dpm.o
# add DCE block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ee45d9f7f3dc..9d79e4ba0213 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -57,6 +57,7 @@
#include "amdgpu_acp.h"
#include "gpu_scheduler.h"
+#include "amdgpu_virt.h"
/*
* Modules parameters.
@@ -1827,6 +1828,7 @@ struct amdgpu_asic_funcs {
bool (*read_disabled_bios)(struct amdgpu_device *adev);
bool (*read_bios_from_rom)(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
+ void (*detect_hw_virtualization) (struct amdgpu_device *adev);
int (*read_register)(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
@@ -1836,8 +1838,6 @@ struct amdgpu_asic_funcs {
/* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
- /* query virtual capabilities */
- u32 (*get_virtual_caps)(struct amdgpu_device *adev);
/* static power management */
int (*get_pcie_lanes)(struct amdgpu_device *adev);
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
@@ -1933,16 +1933,6 @@ struct amdgpu_atcs {
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
-
-/* GPU virtualization */
-#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0)
-#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1)
-struct amdgpu_virtualization {
- bool supports_sr_iov;
- bool is_virtual;
- u32 caps;
-};
-
/*
* Core structure, functions and helpers.
*/
@@ -2260,12 +2250,12 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
-#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
+#define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
@@ -2323,6 +2313,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
+#define amdgpu_dpm_read_sensor(adev, idx, value) \
+ ((adev)->pp_enabled ? \
+ (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value)) : \
+ -EINVAL)
+
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
@@ -2374,11 +2369,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g)))
-#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
-
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
@@ -2460,11 +2450,13 @@ void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
+bool amdgpu_atpx_dgpu_req_power_for_displays(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
+static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
#endif
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 5cd7b736a9de..5796539a0bcb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -25,6 +25,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
+#include <linux/pm_runtime.h>
#include <acpi/video.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -333,6 +334,16 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif
}
}
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((adev->flags & AMD_IS_PX) &&
+ amdgpu_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(adev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev->ddev);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ }
+ }
/* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d080d0807a5b..dba8a5b25e66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -143,14 +143,6 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
return r;
}
-u32 pool_to_domain(enum kgd_memory_pool p)
-{
- switch (p) {
- case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
- default: return AMDGPU_GEM_DOMAIN_GTT;
- }
-}
-
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 49de92600074..550c5ee704ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -29,6 +29,7 @@ struct amdgpu_atpx {
acpi_handle handle;
struct amdgpu_atpx_functions functions;
bool is_hybrid;
+ bool dgpu_req_power_for_displays;
};
static struct amdgpu_atpx_priv {
@@ -73,6 +74,10 @@ bool amdgpu_is_atpx_hybrid(void) {
return amdgpu_atpx_priv.atpx.is_hybrid;
}
+bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
+ return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
+}
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -213,6 +218,10 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = true;
}
+ atpx->dgpu_req_power_for_displays = false;
+ if (valid_bits & ATPX_DGPU_REQ_POWER_FOR_DISPLAYS)
+ atpx->dgpu_req_power_for_displays = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f1c53a2b09c6..7a8bfa34682f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -711,6 +711,47 @@ static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode
return -EINVAL;
}
+static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
+ enum cgs_ucode_id type)
+{
+ CGS_FUNC_ADEV;
+ uint16_t fw_version;
+
+ switch (type) {
+ case CGS_UCODE_ID_SDMA0:
+ fw_version = adev->sdma.instance[0].fw_version;
+ break;
+ case CGS_UCODE_ID_SDMA1:
+ fw_version = adev->sdma.instance[1].fw_version;
+ break;
+ case CGS_UCODE_ID_CP_CE:
+ fw_version = adev->gfx.ce_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_PFP:
+ fw_version = adev->gfx.pfp_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_ME:
+ fw_version = adev->gfx.me_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT1:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT2:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_RLC_G:
+ fw_version = adev->gfx.rlc_fw_version;
+ break;
+ default:
+ DRM_ERROR("firmware type %d do not have version\n", type);
+ fw_version = 0;
+ }
+ return fw_version;
+}
+
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
@@ -741,6 +782,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->mc_addr = gpu_addr;
info->image_size = data_size;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
+ info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
} else {
char fw_name[30] = {0};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 319a5e1d9389..decbba5ad438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1545,7 +1545,8 @@ static int amdgpu_connector_virtual_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-int amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
+static int
+amdgpu_connector_virtual_dpms(struct drm_connector *connector, int mode)
{
return 0;
}
@@ -1557,7 +1558,8 @@ amdgpu_connector_virtual_detect(struct drm_connector *connector, bool force)
return connector_status_connected;
}
-int amdgpu_connector_virtual_set_property(struct drm_connector *connector,
+static int
+amdgpu_connector_virtual_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3ddae5ff41bb..99a15cad6789 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -50,6 +50,7 @@
#include "vi.h"
#include "bif/bif_4_1_d.h"
#include <linux/pci.h>
+#include <linux/firmware.h>
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
@@ -110,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
bool always_indirect)
{
trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
+
if ((reg * 4) < adev->rmmio_size && !always_indirect)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
else {
@@ -651,6 +652,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev)
}
+static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ if (amdgpu_passthrough(adev)) {
+ /* for FIJI: In whole GPU pass-through virtualization case
+ * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH)
+ * so amdgpu_card_posted return false and driver will incorrectly skip vPost.
+ * but if we force vPost do in pass-through case, the driver reload will hang.
+ * whether doing vPost depends on amdgpu_card_posted if smc version is above
+ * 00160e00 for FIJI.
+ */
+ if (adev->asic_type == CHIP_FIJI) {
+ int err;
+ uint32_t fw_ver;
+ err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
+ /* force vPost if error occured */
+ if (err)
+ return true;
+
+ fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ if (fw_ver >= 0x00160e00)
+ return !amdgpu_card_posted(adev);
+ }
+ } else {
+ /* in bare-metal case, amdgpu_card_posted return false
+ * after system reboot/boot, and return true if driver
+ * reloaded.
+ * we shouldn't do vPost after driver reload otherwise GPU
+ * could hang.
+ */
+ if (amdgpu_card_posted(adev))
+ return false;
+ }
+
+ /* we assume vPost is neede for all other cases */
+ return true;
+}
+
/**
* amdgpu_dummy_page_init - init dummy page used by the driver
*
@@ -1485,13 +1526,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_device_is_virtual(void)
+static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
-#ifdef CONFIG_X86
- return boot_cpu_has(X86_FEATURE_HYPERVISOR);
-#else
- return false;
-#endif
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
}
/**
@@ -1648,25 +1686,24 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
- /* See if the asic supports SR-IOV */
- adev->virtualization.supports_sr_iov =
- amdgpu_atombios_has_gpu_virtualization_table(adev);
-
- /* Check if we are executing in a virtualized environment */
- adev->virtualization.is_virtual = amdgpu_device_is_virtual();
- adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+ /* detect if we are with an SRIOV vbios */
+ amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
- if (!amdgpu_card_posted(adev) ||
- (adev->virtualization.is_virtual &&
- !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
+ if (amdgpu_vpost_needed(adev)) {
if (!adev->bios) {
- dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
+ dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU not posted. posting now...\n");
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ DRM_INFO("GPU posting now...\n");
+ r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (r) {
+ dev_err(adev->dev, "gpu post error!\n");
+ goto failed;
+ }
+ } else {
+ DRM_INFO("GPU post is not needed\n");
}
/* Initialize clocks */
@@ -1842,8 +1879,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
adev = dev->dev_private;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
drm_kms_helper_poll_disable(dev);
@@ -1928,8 +1964,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
struct drm_crtc *crtc;
int r;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
- dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (fbcon)
@@ -2043,7 +2078,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
return asic_hang;
}
-int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2714,7 +2749,7 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
- config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
@@ -2773,6 +2808,29 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
return result;
}
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int idx, r;
+ int32_t value;
+
+ if (size != 4 || *pos & 0x3)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &value);
+ else
+ return -EINVAL;
+
+ if (!r)
+ r = put_user(value, (int32_t *)buf);
+
+ return !r ? 4 : r;
+}
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
@@ -2805,12 +2863,19 @@ static const struct file_operations amdgpu_debugfs_gca_config_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
&amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
};
static const char *debugfs_regs_names[] = {
@@ -2819,6 +2884,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
"amdgpu_gca_config",
+ "amdgpu_sensors",
};
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 596362624610..7dbc7727e32b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -56,9 +56,10 @@
* - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
* - 3.5.0 - Add support for new UVD_NO_OP register.
* - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
+ * - 3.7.0 - Add support for VCE clock list packet
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 6
+#define KMS_DRIVER_MINOR 7
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -485,7 +486,7 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
- if (adev->virtualization.is_virtual)
+ if (amdgpu_passthrough(adev))
amdgpu_pci_remove(pdev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d4ec3cb187a5..accc908bdc88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1322,6 +1322,64 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
+{
+ int32_t value;
+
+ /* sanity check PP is enabled */
+ if (!(adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->read_sensor))
+ return -EINVAL;
+
+ /* GPU Clocks */
+ seq_printf(m, "GFX Clocks and Power:\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, &value))
+ seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, &value))
+ seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, &value))
+ seq_printf(m, "\t%u mV (VDDGFX)\n", value);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, &value))
+ seq_printf(m, "\t%u mV (VDDNB)\n", value);
+ seq_printf(m, "\n");
+
+ /* GPU Temp */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, &value))
+ seq_printf(m, "GPU Temperature: %u C\n", value/1000);
+
+ /* GPU Load */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value))
+ seq_printf(m, "GPU Load: %u %%\n", value);
+ seq_printf(m, "\n");
+
+ /* UVD clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, &value)) {
+ if (!value) {
+ seq_printf(m, "UVD: Disabled\n");
+ } else {
+ seq_printf(m, "UVD: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, &value))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, &value))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
+ }
+ seq_printf(m, "\n");
+
+ /* VCE clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, &value)) {
+ if (!value) {
+ seq_printf(m, "VCE: Disabled\n");
+ } else {
+ seq_printf(m, "VCE: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, &value))
+ seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ }
+ }
+
+ return 0;
+}
+
static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1337,11 +1395,11 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
} else if (adev->pp_enabled) {
- amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+ return amdgpu_debugfs_pm_info_pp(m, adev);
} else {
mutex_lock(&adev->pm.mutex);
if (adev->pm.funcs->debugfs_print_current_performance_level)
- amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
+ adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 1e7f160f23d8..68ad24101a36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -80,15 +80,6 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
break;
#endif
- case CHIP_TOPAZ:
- amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
- break;
- case CHIP_TONGA:
- amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
- break;
- case CHIP_FIJI:
- amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
- break;
case CHIP_CARRIZO:
case CHIP_STONEY:
amd_pp->ip_funcs = &cz_dpm_ip_funcs;
@@ -110,11 +101,11 @@ static int amdgpu_pp_early_init(void *handle)
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
- adev->pp_enabled = true;
- break;
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_TOPAZ:
+ adev->pp_enabled = true;
+ break;
case CHIP_CARRIZO:
case CHIP_STONEY:
adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 2c9ea9b50f48..06b94c13c2c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -691,6 +691,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
case 0x04000008: /* rdo */
case 0x04000009: /* vui */
case 0x05000002: /* auxiliary buffer */
+ case 0x05000009: /* clock table */
break;
case 0x03000001: /* encode */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 88d68cb6e89d..2c37a374917f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -19,22 +19,39 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Author: Monk.liu@amd.com
*/
+#ifndef AMDGPU_VIRT_H
+#define AMDGPU_VIRT_H
-#ifndef _POLARIS10_CLOCK_POWER_GATING_H_
-#define _POLARIS10_CLOCK_POWER_GATING_H_
+#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
+#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
+#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
+#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
+/* GPU virtualization */
+struct amdgpu_virtualization {
+ uint32_t virtual_caps;
+};
-#include "polaris10_hwmgr.h"
-#include "pp_asicblocks.h"
+#define amdgpu_sriov_enabled(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
-int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-int polaris10_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
-int polaris10_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-int polaris10_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
- const uint32_t *msg_id);
-int polaris10_phm_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+#define amdgpu_sriov_vf(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_IS_VF)
-#endif /* _POLARIS10_CLOCK_POWER_GATING_H_ */
+#define amdgpu_sriov_bios(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS)
+
+#define amdgpu_passthrough(adev) \
+((adev)->virtualization.virtual_caps & AMDGPU_PASSTHROUGH_MODE)
+
+static inline bool is_virtual_machine(void)
+{
+#ifdef CONFIG_X86
+ return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+ return false;
+#endif
+}
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 825de800b798..a845b6a93b79 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -963,12 +963,6 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
-{
- /* CIK does not support SR-IOV */
- return 0;
-}
-
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false},
@@ -1641,6 +1635,12 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
+static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
{
/* ORDER MATTERS! */
@@ -2384,13 +2384,13 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
{
.read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
+ .detect_hw_virtualization = cik_detect_hw_virtualization,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
- .get_virtual_caps = &cik_get_virtual_caps,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index c4f6f00d62bc..8659852aea9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -562,4 +562,40 @@ enum {
MTYPE_NONCACHED = 3
};
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x) ((x) << 0)
+#define RB_MAP_PKR0_MASK (0x3 << 0)
+#define RB_MAP_PKR1(x) ((x) << 2)
+#define RB_MAP_PKR1_MASK (0x3 << 2)
+#define RB_XSEL2(x) ((x) << 4)
+#define RB_XSEL2_MASK (0x3 << 4)
+#define RB_XSEL (1 << 6)
+#define RB_YSEL (1 << 7)
+#define PKR_MAP(x) ((x) << 8)
+#define PKR_MAP_MASK (0x3 << 8)
+#define PKR_XSEL(x) ((x) << 10)
+#define PKR_XSEL_MASK (0x3 << 10)
+#define PKR_YSEL(x) ((x) << 12)
+#define PKR_YSEL_MASK (0x3 << 12)
+#define SC_MAP(x) ((x) << 16)
+#define SC_MAP_MASK (0x3 << 16)
+#define SC_XSEL(x) ((x) << 18)
+#define SC_XSEL_MASK (0x3 << 18)
+#define SC_YSEL(x) ((x) << 20)
+#define SC_YSEL_MASK (0x3 << 20)
+#define SE_MAP(x) ((x) << 24)
+#define SE_MAP_MASK (0x3 << 24)
+#define SE_XSEL(x) ((x) << 26)
+#define SE_XSEL_MASK (0x3 << 26)
+#define SE_YSEL(x) ((x) << 28)
+#define SE_YSEL_MASK (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x) ((x) << 0)
+#define SE_PAIR_MAP_MASK (0x3 << 0)
+#define SE_PAIR_XSEL(x) ((x) << 2)
+#define SE_PAIR_XSEL_MASK (0x3 << 2)
+#define SE_PAIR_YSEL(x) ((x) << 4)
+#define SE_PAIR_YSEL_MASK (0x3 << 4)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index 95887e484c51..aed7033c0973 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -101,13 +101,6 @@ int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
return 0;
}
-int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev,
- u16 msg, u32 parameter)
-{
- WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc_async(adev, msg);
-}
-
int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
u16 msg, u32 parameter)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index bc5bb4eb9625..9d38fe0519e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -221,7 +221,7 @@ static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/
static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
- unsigned i = 0;
+ unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
@@ -233,14 +233,16 @@ static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame.
*/
while (dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index d3512f381e53..eb8f96a61491 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -146,7 +146,7 @@ static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/
static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
- unsigned i = 0;
+ unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
@@ -158,14 +158,16 @@ static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame.
*/
while (dce_v6_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v6_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v6_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v6_0_is_counter_moving(adev, crtc))
break;
}
@@ -185,7 +187,7 @@ static void dce_v6_0_pageflip_interrupt_init(struct amdgpu_device *adev)
unsigned i;
/* Enable pflip interrupts */
- for (i = 0; i <= adev->mode_info.num_crtc; i++)
+ for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_get(adev, &adev->pageflip_irq, i);
}
@@ -194,7 +196,7 @@ static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
unsigned i;
/* Disable pflip interrupts */
- for (i = 0; i <= adev->mode_info.num_crtc; i++)
+ for (i = 0; i < adev->mode_info.num_crtc; i++)
amdgpu_irq_put(adev, &adev->pageflip_irq, i);
}
@@ -1420,21 +1422,29 @@ static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
}
-static void dce_v6_0_afmt_init(struct amdgpu_device *adev)
+static int dce_v6_0_afmt_init(struct amdgpu_device *adev)
{
- int i;
+ int i, j;
for (i = 0; i < adev->mode_info.num_dig; i++)
adev->mode_info.afmt[i] = NULL;
- /* DCE8 has audio blocks tied to DIG encoders */
+ /* DCE6 has audio blocks tied to DIG encoders */
for (i = 0; i < adev->mode_info.num_dig; i++) {
adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
if (adev->mode_info.afmt[i]) {
adev->mode_info.afmt[i]->offset = dig_offsets[i];
adev->mode_info.afmt[i]->id = i;
+ } else {
+ for (j = 0; j < i; j++) {
+ kfree(adev->mode_info.afmt[j]);
+ adev->mode_info.afmt[j] = NULL;
+ }
+ DRM_ERROR("Out of memory allocating afmt table\n");
+ return -ENOMEM;
}
}
+ return 0;
}
static void dce_v6_0_afmt_fini(struct amdgpu_device *adev)
@@ -2397,7 +2407,9 @@ static int dce_v6_0_sw_init(void *handle)
return -EINVAL;
/* setup afmt */
- dce_v6_0_afmt_init(adev);
+ r = dce_v6_0_afmt_init(adev);
+ if (r)
+ return r;
r = dce_v6_0_audio_init(adev);
if (r)
@@ -2782,7 +2794,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
uint32_t disp_int, mask, int_control, tmp;
unsigned hpd;
- if (entry->src_data > 6) {
+ if (entry->src_data >= adev->mode_info.num_hpd) {
DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index abd5213dfe18..a7decf977b5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -170,7 +170,7 @@ static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
*/
static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
- unsigned i = 0;
+ unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
@@ -182,14 +182,16 @@ static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
* wait for another frame.
*/
while (dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ % 100 == 0) {
+ if (i++ == 100) {
+ i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 619b604ab8ae..30badd261269 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -95,7 +95,7 @@ static bool dce_virtual_is_display_hung(struct amdgpu_device *adev)
return false;
}
-void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
+static void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
switch (adev->asic_type) {
@@ -127,13 +127,13 @@ void dce_virtual_stop_mc_access(struct amdgpu_device *adev,
return;
}
-void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
+static void dce_virtual_resume_mc_access(struct amdgpu_device *adev,
struct amdgpu_mode_mc_save *save)
{
return;
}
-void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
+static void dce_virtual_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
deleted file mode 100644
index ed03b75175d4..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_smum.h"
-
-MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int fiji_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int fiji_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/fiji_smc.bin";
- int err;
-
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int fiji_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = fiji_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int fiji_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int fiji_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = fiji_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = fiji_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int fiji_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- mutex_lock(&adev->pm.mutex);
- fiji_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int fiji_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_hw_fini(adev);
-
- return 0;
-}
-
-static int fiji_dpm_resume(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- fiji_dpm_hw_init(adev);
-
- return 0;
-}
-
-static int fiji_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int fiji_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs fiji_dpm_ip_funcs = {
- .name = "fiji_dpm",
- .early_init = fiji_dpm_early_init,
- .late_init = NULL,
- .sw_init = fiji_dpm_sw_init,
- .sw_fini = fiji_dpm_sw_fini,
- .hw_init = fiji_dpm_hw_init,
- .hw_fini = fiji_dpm_hw_fini,
- .suspend = fiji_dpm_suspend,
- .resume = fiji_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = fiji_dpm_set_clockgating_state,
- .set_powergating_state = fiji_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs fiji_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void fiji_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &fiji_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
deleted file mode 100644
index b3e19ba4c57f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ /dev/null
@@ -1,863 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "fiji_ppsmc.h"
-#include "fiji_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-
-#define FIJI_SMC_SIZE 0x20000
-
-static int fiji_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int fiji_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = fiji_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- fiji_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-static bool fiji_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, 0x20000);
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!fiji_is_smc_ram_running(adev))
- {
- return -EINVAL;
- }
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int fiji_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- if (!fiji_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return fiji_send_msg_to_smc(adev, msg);
-}
-
-static int fiji_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return fiji_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int fiji_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!fiji_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int fiji_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t *data;
- unsigned long flags;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > FIJI_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- WREG32(mmSMC_IND_DATA_0, data[0]);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int fiji_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = fiji_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = fiji_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int fiji_smu_stop_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID fiji_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static int fiji_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = fiji_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
- (fw_type == UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
- data_size = le32_to_cpu(header->jt_size) << 2;
- }
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int fiji_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct fiji_smu_private_data *private = (struct fiji_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (fiji_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- fiji_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK;
-
- if (fiji_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static uint32_t fiji_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int fiji_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = fiji_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int fiji_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
- int i;
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = fiji_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Clear status */
- WREG32_SMC(ixSMU_STATUS, 0);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Set SMU Auto Start */
- val = RREG32_SMC(ixSMU_INPUT_DATA);
- val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
- WREG32_SMC(ixSMU_INPUT_DATA, val);
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Interrupt is not enabled by firmware\n");
- return -EINVAL;
- }
-
- /* Call Test SMU message with 0x20000 offset
- * to trigger SMU start
- */
- fiji_send_msg_to_smc_offset(adev);
- DRM_INFO("[FM]try triger smu start\n");
- /* Wait for done bit to be set */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMU_STATUS);
- if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMU start\n");
- return -EINVAL;
- }
-
- /* Check pass/failed indicator */
- val = RREG32_SMC(ixSMU_STATUS);
- if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
- DRM_ERROR("SMU Firmware start failed\n");
- return -EINVAL;
- }
- DRM_INFO("[FM]smu started\n");
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMU firmware initialization failed\n");
- return -EINVAL;
- }
- DRM_INFO("[FM]smu initialized\n");
-
- return 0;
-}
-
-static int fiji_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
- int i, result;
- uint32_t val;
-
- /* wait for smc boot up */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
- if (val)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMC boot sequence is not completed\n");
- return -EINVAL;
- }
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = fiji_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Set smc instruct start point at 0x0 */
- fiji_program_jump_on_start(adev);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMC firmware initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int fiji_smu_start(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
-
- if (!fiji_is_smc_ram_running(adev)) {
- val = RREG32_SMC(ixSMU_FIRMWARE);
- if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
- DRM_INFO("[FM]start smu in nonprotection mode\n");
- result = fiji_smu_start_in_non_protection_mode(adev);
- if (result)
- return result;
- } else {
- DRM_INFO("[FM]start smu in protection mode\n");
- result = fiji_smu_start_in_protection_mode(adev);
- if (result)
- return result;
- }
- }
-
- return fiji_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs fiji_smumgr_funcs = {
- .check_fw_load_finish = fiji_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int fiji_smu_init(struct amdgpu_device *adev)
-{
- struct fiji_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- uint32_t smu_internal_buffer_size = 200*4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- void *smu_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct fiji_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Allocate buffer for SMU internal buffer */
- ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, smu_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the SMU internal buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- private->smu_buffer_addr_low = lower_32_bits(mc_addr);
- private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
- adev->smu.smumgr_funcs = &fiji_smumgr_funcs;
-
- return 0;
-}
-
-int fiji_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 410b29c05671..40abb6b81c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -931,6 +931,123 @@ static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev,
return data & mask;
}
+static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf)
+{
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ *rconf |= RB_XSEL2(2) | RB_XSEL | PKR_MAP(2) | PKR_YSEL(1) |
+ SE_MAP(2) | SE_XSEL(2) | SE_YSEL(2);
+ break;
+ case CHIP_VERDE:
+ *rconf |= RB_XSEL | PKR_MAP(2) | PKR_YSEL(1);
+ break;
+ case CHIP_OLAND:
+ *rconf |= RB_YSEL;
+ break;
+ case CHIP_HAINAN:
+ *rconf |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, unsigned rb_mask,
+ unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on SI */
+ gfx_v6_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(PA_SC_RASTER_CONFIG, raster_config_se);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on SI */
+ gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
u32 se_num, u32 sh_per_se,
u32 max_rb_num_per_se)
@@ -939,6 +1056,7 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
u32 data, mask;
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
+ unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
@@ -961,6 +1079,9 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
adev->gfx.config.backend_enable_mask = enabled_rbs;
adev->gfx.config.num_rbs = hweight32(enabled_rbs);
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < se_num; i++) {
gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
@@ -980,7 +1101,15 @@ static void gfx_v6_0_setup_rb(struct amdgpu_device *adev,
}
enabled_rbs >>= 2;
}
- WREG32(PA_SC_RASTER_CONFIG, data);
+ gfx_v6_0_raster_config(adev, &data);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes)
+ WREG32(PA_SC_RASTER_CONFIG, data);
+ else
+ gfx_v6_0_write_harvested_raster_configs(adev, data,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
}
gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 90102f123bb8..32a676291e67 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1645,6 +1645,147 @@ static u32 gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask;
}
+static void
+gfx_v7_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_HAWAII:
+ *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+ RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
+ PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
+ SE_YSEL(3);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_KAVERI:
+ *rconf |= RB_MAP_PKR0(2);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ *rconf |= 0x0;
+ *rconf1 |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void
+gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, u32 raster_config_1,
+ unsigned rb_mask, unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+ (!se_mask[2] && !se_mask[3]))) {
+ raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+ if (!se_mask[0] && !se_mask[1]) {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+ } else {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+ }
+ }
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on CI+ */
+ gfx_v7_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on CI+ */
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
/**
* gfx_v7_0_setup_rb - setup the RBs on the asic
*
@@ -1658,9 +1799,11 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
+ u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -1672,10 +1815,25 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
}
}
gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ gfx_v7_0_raster_config(adev, &raster_config, &raster_config_1);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes) {
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ } else {
+ gfx_v7_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+ mutex_unlock(&adev->grbm_idx_mutex);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 47e270ad4fe3..6c6ff57b1c95 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3492,13 +3492,163 @@ static u32 gfx_v8_0_get_rb_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask;
}
+static void
+gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
+{
+ switch (adev->asic_type) {
+ case CHIP_FIJI:
+ *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
+ RB_XSEL2(1) | PKR_MAP(2) |
+ PKR_XSEL(1) | PKR_YSEL(1) |
+ SE_MAP(2) | SE_XSEL(2) | SE_YSEL(3);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= SE_PAIR_MAP(2) | SE_PAIR_XSEL(2) |
+ SE_PAIR_YSEL(2);
+ break;
+ case CHIP_TOPAZ:
+ case CHIP_CARRIZO:
+ *rconf |= RB_MAP_PKR0(2);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_POLARIS11:
+ *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
+ SE_XSEL(1) | SE_YSEL(1);
+ *rconf1 |= 0x0;
+ break;
+ case CHIP_STONEY:
+ *rconf |= 0x0;
+ *rconf1 |= 0x0;
+ break;
+ default:
+ DRM_ERROR("unknown asic: 0x%x\n", adev->asic_type);
+ break;
+ }
+}
+
+static void
+gfx_v8_0_write_harvested_raster_configs(struct amdgpu_device *adev,
+ u32 raster_config, u32 raster_config_1,
+ unsigned rb_mask, unsigned num_rb)
+{
+ unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
+ unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
+ unsigned rb_per_pkr = min_t(unsigned, num_rb / num_se / sh_per_se, 2);
+ unsigned rb_per_se = num_rb / num_se;
+ unsigned se_mask[4];
+ unsigned se;
+
+ se_mask[0] = ((1 << rb_per_se) - 1) & rb_mask;
+ se_mask[1] = (se_mask[0] << rb_per_se) & rb_mask;
+ se_mask[2] = (se_mask[1] << rb_per_se) & rb_mask;
+ se_mask[3] = (se_mask[2] << rb_per_se) & rb_mask;
+
+ WARN_ON(!(num_se == 1 || num_se == 2 || num_se == 4));
+ WARN_ON(!(sh_per_se == 1 || sh_per_se == 2));
+ WARN_ON(!(rb_per_pkr == 1 || rb_per_pkr == 2));
+
+ if ((num_se > 2) && ((!se_mask[0] && !se_mask[1]) ||
+ (!se_mask[2] && !se_mask[3]))) {
+ raster_config_1 &= ~SE_PAIR_MAP_MASK;
+
+ if (!se_mask[0] && !se_mask[1]) {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3);
+ } else {
+ raster_config_1 |=
+ SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0);
+ }
+ }
+
+ for (se = 0; se < num_se; se++) {
+ unsigned raster_config_se = raster_config;
+ unsigned pkr0_mask = ((1 << rb_per_pkr) - 1) << (se * rb_per_se);
+ unsigned pkr1_mask = pkr0_mask << rb_per_pkr;
+ int idx = (se / 2) * 2;
+
+ if ((num_se > 1) && (!se_mask[idx] || !se_mask[idx + 1])) {
+ raster_config_se &= ~SE_MAP_MASK;
+
+ if (!se_mask[idx]) {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_3);
+ } else {
+ raster_config_se |= SE_MAP(RASTER_CONFIG_SE_MAP_0);
+ }
+ }
+
+ pkr0_mask &= rb_mask;
+ pkr1_mask &= rb_mask;
+ if (rb_per_se > 2 && (!pkr0_mask || !pkr1_mask)) {
+ raster_config_se &= ~PKR_MAP_MASK;
+
+ if (!pkr0_mask) {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_3);
+ } else {
+ raster_config_se |= PKR_MAP(RASTER_CONFIG_PKR_MAP_0);
+ }
+ }
+
+ if (rb_per_se >= 2) {
+ unsigned rb0_mask = 1 << (se * rb_per_se);
+ unsigned rb1_mask = rb0_mask << 1;
+
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR0_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+
+ if (rb_per_se > 2) {
+ rb0_mask = 1 << (se * rb_per_se + rb_per_pkr);
+ rb1_mask = rb0_mask << 1;
+ rb0_mask &= rb_mask;
+ rb1_mask &= rb_mask;
+ if (!rb0_mask || !rb1_mask) {
+ raster_config_se &= ~RB_MAP_PKR1_MASK;
+
+ if (!rb0_mask) {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3);
+ } else {
+ raster_config_se |=
+ RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0);
+ }
+ }
+ }
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on VI */
+ gfx_v8_0_select_se_sh(adev, se, 0xffffffff, 0xffffffff);
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config_se);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ }
+
+ /* GRBM_GFX_INDEX has a different offset on VI */
+ gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+}
+
static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
{
int i, j;
u32 data;
+ u32 raster_config = 0, raster_config_1 = 0;
u32 active_rbs = 0;
u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
adev->gfx.config.max_sh_per_se;
+ unsigned num_rb_pipes;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
@@ -3510,10 +3660,26 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
}
}
gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.config.backend_enable_mask = active_rbs;
adev->gfx.config.num_rbs = hweight32(active_rbs);
+
+ num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines, 16);
+
+ gfx_v8_0_raster_config(adev, &raster_config, &raster_config_1);
+
+ if (!adev->gfx.config.backend_enable_mask ||
+ adev->gfx.config.num_rbs >= num_rb_pipes) {
+ WREG32(mmPA_SC_RASTER_CONFIG, raster_config);
+ WREG32(mmPA_SC_RASTER_CONFIG_1, raster_config_1);
+ } else {
+ gfx_v8_0_write_harvested_raster_configs(adev, raster_config, raster_config_1,
+ adev->gfx.config.backend_enable_mask,
+ num_rb_pipes);
+ }
+
+ mutex_unlock(&adev->grbm_idx_mutex);
}
/**
@@ -5817,6 +5983,76 @@ static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
+static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_3D,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_MG,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_RLC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
+ PP_BLOCK_GFX_CP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
static int gfx_v8_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -5829,6 +6065,13 @@ static int gfx_v8_0_set_clockgating_state(void *handle,
gfx_v8_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
+ case CHIP_TONGA:
+ gfx_v8_0_tonga_update_gfx_clock_gating(adev, state);
+ break;
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ gfx_v8_0_polaris_update_gfx_clock_gating(adev, state);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 84c10d5117a9..1b319f5bc696 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -269,8 +269,10 @@ static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev)
/* Skip MC ucode loading on SR-IOV capable boards.
* vbios does this for us in asic_init in that case.
+ * Skip MC ucode loading on VF, because hypervisor will do that
+ * for this adaptor.
*/
- if (adev->virtualization.supports_sr_iov)
+ if (amdgpu_sriov_bios(adev))
return 0;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data;
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
deleted file mode 100644
index 2f078ad6095c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "iceland_smum.h"
-
-MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int iceland_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- iceland_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int iceland_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/topaz_smc.bin";
- int err;
-
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int iceland_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = iceland_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int iceland_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int iceland_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu init only needs to be called at startup, not resume.
- * It should be in sw_init, but requires the fw info gathered
- * in sw_init from other IP modules.
- */
- ret = iceland_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = iceland_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int iceland_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
- /* smu fini only needs to be called at teardown, not suspend.
- * It should be in sw_fini, but we put it here for symmetry
- * with smu init.
- */
- iceland_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int iceland_dpm_suspend(void *handle)
-{
- return 0;
-}
-
-static int iceland_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- ret = iceland_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
-fail:
- mutex_unlock(&adev->pm.mutex);
- return ret;
-}
-
-static int iceland_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int iceland_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs iceland_dpm_ip_funcs = {
- .name = "iceland_dpm",
- .early_init = iceland_dpm_early_init,
- .late_init = NULL,
- .sw_init = iceland_dpm_sw_init,
- .sw_fini = iceland_dpm_sw_fini,
- .hw_init = iceland_dpm_hw_init,
- .hw_fini = iceland_dpm_hw_fini,
- .suspend = iceland_dpm_suspend,
- .resume = iceland_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = iceland_dpm_set_clockgating_state,
- .set_powergating_state = iceland_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs iceland_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void iceland_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &iceland_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
deleted file mode 100644
index ef7c27d7356a..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ /dev/null
@@ -1,677 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "ppsmc.h"
-#include "iceland_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
-
-#define ICELAND_SMC_SIZE 0x20000
-
-static int iceland_set_smc_sram_address(struct amdgpu_device *adev,
- uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev,
- uint32_t smc_start_address,
- const uint8_t *src,
- uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = iceland_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static void iceland_start_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-static void iceland_reset_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-}
-
-static int iceland_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-static void iceland_stop_smc_clock(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-static void iceland_start_smc_clock(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-}
-
-static bool iceland_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return iceland_send_msg_to_smc(adev, msg);
-}
-
-static int iceland_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return iceland_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!iceland_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t data;
- unsigned long flags;
- int i;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > ICELAND_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0)
- break;
- udelay(1);
- }
- val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL);
- WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1);
-
- iceland_stop_smc_clock(adev);
- iceland_reset_smc(adev);
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- while (byte_count >= 4) {
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
- WREG32(mmSMC_IND_DATA_0, data);
- src += 4;
- byte_count -= 4;
- }
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int iceland_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = iceland_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int iceland_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = iceland_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int iceland_smu_stop_smc(struct amdgpu_device *adev)
-{
- iceland_reset_smc(adev);
- iceland_stop_smc_clock(adev);
-
- return 0;
-}
-#endif
-
-static int iceland_smu_start_smc(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- iceland_program_jump_on_start(adev);
- iceland_start_smc_clock(adev);
- iceland_start_smc(adev);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1)
- break;
- udelay(1);
- }
- return 0;
-}
-
-static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC2;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
- entry->flags = 0;
-
- return 0;
-}
-
-static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK |
- UCODE_ID_CP_MEC_JT1_MASK;
-
-
- if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int iceland_smu_start(struct amdgpu_device *adev)
-{
- int result;
-
- result = iceland_smu_upload_firmware_image(adev);
- if (result)
- return result;
- result = iceland_smu_start_smc(adev);
- if (result)
- return result;
-
- return iceland_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = {
- .check_fw_load_finish = iceland_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int iceland_smu_init(struct amdgpu_device *adev)
-{
- struct iceland_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- adev->smu.smumgr_funcs = &iceland_smumgr_funcs;
-
- return 0;
-}
-
-int iceland_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index fee76b8a536f..dc9511c5ecb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -952,12 +952,6 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
}
-static u32 si_get_virtual_caps(struct amdgpu_device *adev)
-{
- /* SI does not support SR-IOV */
- return 0;
-}
-
static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = {
{GRBM_STATUS, false},
{GB_ADDR_CONFIG, false},
@@ -1124,16 +1118,22 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
return 0;
}
+static void si_detect_hw_virtualization(struct amdgpu_device *adev)
+{
+ if (is_virtual_machine()) /* passthrough mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+}
+
static const struct amdgpu_asic_funcs si_asic_funcs =
{
.read_disabled_bios = &si_read_disabled_bios,
+ .detect_hw_virtualization = si_detect_hw_virtualization,
.read_register = &si_read_register,
.reset = &si_asic_reset,
.set_vga_state = &si_vga_set_state,
.get_xclk = &si_get_xclk,
.set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL,
- .get_virtual_caps = &si_get_virtual_caps,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
deleted file mode 100644
index f06f6f4dc3a8..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_smum.h"
-
-MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev);
-
-static int tonga_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- tonga_dpm_set_funcs(adev);
-
- return 0;
-}
-
-static int tonga_dpm_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30] = "amdgpu/tonga_smc.bin";
- int err;
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int tonga_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = tonga_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int tonga_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int tonga_dpm_hw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
-
- /* smu init only needs to be called at startup, not resume.
- * It should be in sw_init, but requires the fw info gathered
- * in sw_init from other IP modules.
- */
- ret = tonga_smu_init(adev);
- if (ret) {
- DRM_ERROR("SMU initialization failed\n");
- goto fail;
- }
-
- ret = tonga_smu_start(adev);
- if (ret) {
- DRM_ERROR("SMU start failed\n");
- goto fail;
- }
-
- mutex_unlock(&adev->pm.mutex);
- return 0;
-
-fail:
- adev->firmware.smu_load = false;
- mutex_unlock(&adev->pm.mutex);
- return -EINVAL;
-}
-
-static int tonga_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- mutex_lock(&adev->pm.mutex);
- /* smu fini only needs to be called at teardown, not suspend.
- * It should be in sw_fini, but we put it here for symmetry
- * with smu init.
- */
- tonga_smu_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- return 0;
-}
-
-static int tonga_dpm_suspend(void *handle)
-{
- return tonga_dpm_hw_fini(handle);
-}
-
-static int tonga_dpm_resume(void *handle)
-{
- return tonga_dpm_hw_init(handle);
-}
-
-static int tonga_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int tonga_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-const struct amd_ip_funcs tonga_dpm_ip_funcs = {
- .name = "tonga_dpm",
- .early_init = tonga_dpm_early_init,
- .late_init = NULL,
- .sw_init = tonga_dpm_sw_init,
- .sw_fini = tonga_dpm_sw_fini,
- .hw_init = tonga_dpm_hw_init,
- .hw_fini = tonga_dpm_hw_fini,
- .suspend = tonga_dpm_suspend,
- .resume = tonga_dpm_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
- .set_clockgating_state = tonga_dpm_set_clockgating_state,
- .set_powergating_state = tonga_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs tonga_dpm_funcs = {
- .get_temperature = NULL,
- .pre_set_power_state = NULL,
- .set_power_state = NULL,
- .post_set_power_state = NULL,
- .display_configuration_changed = NULL,
- .get_sclk = NULL,
- .get_mclk = NULL,
- .print_power_state = NULL,
- .debugfs_print_current_performance_level = NULL,
- .force_performance_level = NULL,
- .vblank_too_short = NULL,
- .powergate_uvd = NULL,
-};
-
-static void tonga_dpm_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->pm.funcs)
- adev->pm.funcs = &tonga_dpm_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
deleted file mode 100644
index 940de1836f8f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "tonga_ppsmc.h"
-#include "tonga_smum.h"
-#include "smu_ucode_xfer_vi.h"
-#include "amdgpu_ucode.h"
-
-#include "smu/smu_7_1_2_d.h"
-#include "smu/smu_7_1_2_sh_mask.h"
-
-#define TONGA_SMC_SIZE 0x20000
-
-static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit)
-{
- uint32_t val;
-
- if (smc_address & 3)
- return -EINVAL;
-
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- return 0;
-}
-
-static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit)
-{
- uint32_t addr;
- uint32_t data, orig_data;
- int result = 0;
- uint32_t extra_shift;
- unsigned long flags;
-
- if (smc_start_address & 3)
- return -EINVAL;
-
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* Bytes are written into the SMC addres space with the MSB first */
- data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
-
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- if (0 != byte_count) {
- /* Now write odd bytes left, do a read modify write cycle */
- data = 0;
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- orig_data = RREG32(mmSMC_IND_DATA_0);
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
- data |= (orig_data & ~((~0UL) << extra_shift));
-
- result = tonga_set_smc_sram_address(adev, addr, limit);
- if (result)
- goto out;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-out:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_program_jump_on_start(struct amdgpu_device *adev)
-{
- static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40};
- tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-
- return 0;
-}
-
-static bool tonga_is_smc_ram_running(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable);
-
- return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C)));
-}
-
-static int wait_smu_response(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32(mmSMC_RESP_0);
- if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, 0x20000);
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- if (!tonga_is_smc_ram_running(adev))
- {
- return -EINVAL;
- }
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send message\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
- PPSMC_Msg msg)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
-static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg,
- uint32_t parameter)
-{
- if (!tonga_is_smc_ram_running(adev))
- return -EINVAL;
-
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc(adev, msg);
-}
-
-static int tonga_send_msg_to_smc_with_parameter_without_waiting(
- struct amdgpu_device *adev,
- PPSMC_Msg msg, uint32_t parameter)
-{
- if (wait_smu_response(adev)) {
- DRM_ERROR("Failed to send previous message\n");
- return -EINVAL;
- }
-
- WREG32(mmSMC_MSG_ARG_0, parameter);
-
- return tonga_send_msg_to_smc_without_waiting(adev, msg);
-}
-
-#if 0 /* not used yet */
-static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- int i;
- uint32_t val;
-
- if (!tonga_is_smc_ram_running(adev))
- return -EINVAL;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout)
- return -EINVAL;
-
- return 0;
-}
-#endif
-
-static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- uint32_t ucode_size;
- uint32_t ucode_start_address;
- const uint8_t *src;
- uint32_t val;
- uint32_t byte_count;
- uint32_t *data;
- unsigned long flags;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- /* Skip SMC ucode loading on SR-IOV capable boards.
- * vbios does this for us in asic_init in that case.
- */
- if (adev->virtualization.supports_sr_iov)
- return 0;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- src = (const uint8_t *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3) {
- DRM_ERROR("SMC ucode is not 4 bytes aligned\n");
- return -EINVAL;
- }
-
- if (ucode_size > TONGA_SMC_SIZE) {
- DRM_ERROR("SMC address is beyond the SMC RAM area\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
-
- byte_count = ucode_size;
- data = (uint32_t *)src;
- for (; byte_count >= 4; data++, byte_count -= 4)
- WREG32(mmSMC_IND_DATA_0, data[0]);
-
- val = RREG32(mmSMC_IND_ACCESS_CNTL);
- val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
- WREG32(mmSMC_IND_ACCESS_CNTL, val);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-#if 0 /* not used yet */
-static int tonga_read_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t *value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = tonga_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_write_smc_sram_dword(struct amdgpu_device *adev,
- uint32_t smc_address,
- uint32_t value,
- uint32_t limit)
-{
- int result;
- unsigned long flags;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- result = tonga_set_smc_sram_address(adev, smc_address, limit);
- if (result == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
- return result;
-}
-
-static int tonga_smu_stop_smc(struct amdgpu_device *adev)
-{
- uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- return 0;
-}
-#endif
-
-static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case UCODE_ID_SDMA0:
- return AMDGPU_UCODE_ID_SDMA0;
- case UCODE_ID_SDMA1:
- return AMDGPU_UCODE_ID_SDMA1;
- case UCODE_ID_CP_CE:
- return AMDGPU_UCODE_ID_CP_CE;
- case UCODE_ID_CP_PFP:
- return AMDGPU_UCODE_ID_CP_PFP;
- case UCODE_ID_CP_ME:
- return AMDGPU_UCODE_ID_CP_ME;
- case UCODE_ID_CP_MEC:
- case UCODE_ID_CP_MEC_JT1:
- return AMDGPU_UCODE_ID_CP_MEC1;
- case UCODE_ID_CP_MEC_JT2:
- return AMDGPU_UCODE_ID_CP_MEC2;
- case UCODE_ID_RLC_G:
- return AMDGPU_UCODE_ID_RLC_G;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return AMDGPU_UCODE_ID_MAXIMUM;
- }
-}
-
-static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
- uint32_t fw_type,
- struct SMU_Entry *entry)
-{
- enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type);
- struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
- const struct gfx_firmware_header_v1_0 *header = NULL;
- uint64_t gpu_addr;
- uint32_t data_size;
-
- if (ucode->fw == NULL)
- return -EINVAL;
-
- gpu_addr = ucode->mc_addr;
- header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- data_size = le32_to_cpu(header->header.ucode_size_bytes);
-
- if ((fw_type == UCODE_ID_CP_MEC_JT1) ||
- (fw_type == UCODE_ID_CP_MEC_JT2)) {
- gpu_addr += le32_to_cpu(header->jt_offset) << 2;
- data_size = le32_to_cpu(header->jt_size) << 2;
- }
-
- entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- entry->id = (uint16_t)fw_type;
- entry->image_addr_high = upper_32_bits(gpu_addr);
- entry->image_addr_low = lower_32_bits(gpu_addr);
- entry->meta_data_addr_high = 0;
- entry->meta_data_addr_low = 0;
- entry->data_size_byte = data_size;
- entry->num_register_entries = 0;
-
- if (fw_type == UCODE_ID_RLC_G)
- entry->flags = 1;
- else
- entry->flags = 0;
-
- return 0;
-}
-
-static int tonga_smu_request_load_fw(struct amdgpu_device *adev)
-{
- struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv;
- struct SMU_DRAMData_TOC *toc;
- uint32_t fw_to_load;
-
- WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0);
-
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high);
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low);
-
- toc = (struct SMU_DRAMData_TOC *)private->header;
- toc->num_entries = 0;
- toc->structure_version = 1;
-
- if (!adev->firmware.smu_load)
- return 0;
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for RLC\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for CE\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for PFP\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for ME\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT1\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA0\n");
- return -EINVAL;
- }
-
- if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1,
- &toc->entry[toc->num_entries++])) {
- DRM_ERROR("Failed to get firmware entry for SDMA1\n");
- return -EINVAL;
- }
-
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high);
- tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low);
-
- fw_to_load = UCODE_ID_RLC_G_MASK |
- UCODE_ID_SDMA0_MASK |
- UCODE_ID_SDMA1_MASK |
- UCODE_ID_CP_CE_MASK |
- UCODE_ID_CP_ME_MASK |
- UCODE_ID_CP_PFP_MASK |
- UCODE_ID_CP_MEC_MASK;
-
- if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
- DRM_ERROR("Fail to request SMU load ucode\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type)
-{
- switch (fw_type) {
- case AMDGPU_UCODE_ID_SDMA0:
- return UCODE_ID_SDMA0_MASK;
- case AMDGPU_UCODE_ID_SDMA1:
- return UCODE_ID_SDMA1_MASK;
- case AMDGPU_UCODE_ID_CP_CE:
- return UCODE_ID_CP_CE_MASK;
- case AMDGPU_UCODE_ID_CP_PFP:
- return UCODE_ID_CP_PFP_MASK;
- case AMDGPU_UCODE_ID_CP_ME:
- return UCODE_ID_CP_ME_MASK;
- case AMDGPU_UCODE_ID_CP_MEC1:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_CP_MEC2:
- return UCODE_ID_CP_MEC_MASK;
- case AMDGPU_UCODE_ID_RLC_G:
- return UCODE_ID_RLC_G_MASK;
- default:
- DRM_ERROR("ucode type is out of range!\n");
- return 0;
- }
-}
-
-static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev,
- uint32_t fw_type)
-{
- uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type);
- int i;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("check firmware loading failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
- int i;
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = tonga_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Clear status */
- WREG32_SMC(ixSMU_STATUS, 0);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Set SMU Auto Start */
- val = RREG32_SMC(ixSMU_INPUT_DATA);
- val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1);
- WREG32_SMC(ixSMU_INPUT_DATA, val);
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Interrupt is not enabled by firmware\n");
- return -EINVAL;
- }
-
- /* Call Test SMU message with 0x20000 offset
- * to trigger SMU start
- */
- tonga_send_msg_to_smc_offset(adev);
-
- /* Wait for done bit to be set */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixSMU_STATUS);
- if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMU start\n");
- return -EINVAL;
- }
-
- /* Check pass/failed indicator */
- val = RREG32_SMC(ixSMU_STATUS);
- if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) {
- DRM_ERROR("SMU Firmware start failed\n");
- return -EINVAL;
- }
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMU firmware initialization failed\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev)
-{
- int i, result;
- uint32_t val;
-
- /* wait for smc boot up */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixRCU_UC_EVENTS);
- val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done);
- if (val)
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("SMC boot sequence is not completed\n");
- return -EINVAL;
- }
-
- /* Clear firmware interrupt enable flag */
- WREG32_SMC(ixFIRMWARE_FLAGS, 0);
-
- /* Assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- result = tonga_smu_upload_firmware_image(adev);
- if (result)
- return result;
-
- /* Set smc instruct start point at 0x0 */
- tonga_program_jump_on_start(adev);
-
- /* Enable clock */
- val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val);
-
- /* De-assert reset */
- val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
- val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val);
-
- /* Wait for firmware to initialize */
- for (i = 0; i < adev->usec_timeout; i++) {
- val = RREG32_SMC(ixFIRMWARE_FLAGS);
- if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED))
- break;
- udelay(1);
- }
-
- if (i == adev->usec_timeout) {
- DRM_ERROR("Timeout for SMC firmware initialization\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-int tonga_smu_start(struct amdgpu_device *adev)
-{
- int result;
- uint32_t val;
-
- if (!tonga_is_smc_ram_running(adev)) {
- val = RREG32_SMC(ixSMU_FIRMWARE);
- if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) {
- result = tonga_smu_start_in_non_protection_mode(adev);
- if (result)
- return result;
- } else {
- result = tonga_smu_start_in_protection_mode(adev);
- if (result)
- return result;
- }
- }
-
- return tonga_smu_request_load_fw(adev);
-}
-
-static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = {
- .check_fw_load_finish = tonga_smu_check_fw_load_finish,
- .request_smu_load_fw = NULL,
- .request_smu_specific_fw = NULL,
-};
-
-int tonga_smu_init(struct amdgpu_device *adev)
-{
- struct tonga_smu_private_data *private;
- uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
- uint32_t smu_internal_buffer_size = 200*4096;
- struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
- struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
- uint64_t mc_addr;
- void *toc_buf_ptr;
- void *smu_buf_ptr;
- int ret;
-
- private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL);
- if (NULL == private)
- return -ENOMEM;
-
- /* allocate firmware buffers */
- if (adev->firmware.smu_load)
- amdgpu_ucode_init_bo(adev);
-
- adev->smu.priv = private;
- adev->smu.fw_flags = 0;
-
- /* Allocate FW image data structure and header buffer */
- ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, toc_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for TOC buffer\n");
- return -ENOMEM;
- }
-
- /* Allocate buffer for SMU internal buffer */
- ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, NULL, smu_buf);
- if (ret) {
- DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
- return -ENOMEM;
- }
-
- /* Retrieve GPU address for header buffer and internal buffer */
- ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the TOC buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the TOC buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.toc_buf);
- private->header_addr_low = lower_32_bits(mc_addr);
- private->header_addr_high = upper_32_bits(mc_addr);
- private->header = toc_buf_ptr;
-
- ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
- if (ret) {
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to reserve the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to pin the SMU internal buffer\n");
- return -EINVAL;
- }
-
- ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
- if (ret) {
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- amdgpu_bo_unref(&adev->smu.toc_buf);
- DRM_ERROR("Failed to map the SMU internal buffer\n");
- return -EINVAL;
- }
-
- amdgpu_bo_unreserve(adev->smu.smu_buf);
- private->smu_buffer_addr_low = lower_32_bits(mc_addr);
- private->smu_buffer_addr_high = upper_32_bits(mc_addr);
-
- adev->smu.smumgr_funcs = &tonga_smumgr_funcs;
-
- return 0;
-}
-
-int tonga_smu_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_unref(&adev->smu.toc_buf);
- amdgpu_bo_unref(&adev->smu.smu_buf);
- kfree(adev->smu.priv);
- adev->smu.priv = NULL;
- if (adev->firmware.fw_buf)
- amdgpu_ucode_fini_bo(adev);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index b688e2f77419..c0d9aad7126f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -79,6 +79,9 @@
#endif
#include "dce_virtual.h"
+MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
+MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
+MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
@@ -445,18 +448,21 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
{
- u32 caps = 0;
- u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ uint32_t reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ /* bit0: 0 means pf and 1 means vf */
+ /* bit31: 0 means disable IOV and 1 means enable */
+ if (reg & 1)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_IS_VF;
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
- caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+ if (reg & 0x80000000)
+ adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
- caps |= AMDGPU_VIRT_CAPS_IS_VF;
-
- return caps;
+ if (reg == 0) {
+ if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
+ adev->virtualization.virtual_caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
}
static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
@@ -1521,13 +1527,13 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
{
.read_disabled_bios = &vi_read_disabled_bios,
.read_bios_from_rom = &vi_read_bios_from_rom,
+ .detect_hw_virtualization = vi_detect_hw_virtualization,
.read_register = &vi_read_register,
.reset = &vi_asic_reset,
.set_vga_state = &vi_vga_set_state,
.get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks,
- .get_virtual_caps = &vi_get_virtual_caps,
};
static int vi_common_early_init(void *handle)
@@ -1657,6 +1663,10 @@ static int vi_common_early_init(void *handle)
return -EINVAL;
}
+ /* in early init stage, vbios code won't work */
+ if (adev->asic_funcs->detect_hw_virtualization)
+ amdgpu_asic_detect_hw_virtualization(adev);
+
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;
@@ -1800,6 +1810,63 @@ static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
}
+static int vi_common_set_clockgating_state_by_smu(void *handle,
+ enum amd_clockgating_state state)
+{
+ uint32_t msg_id, pp_state;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ if (state == AMD_CG_STATE_UNGATE)
+ pp_state = 0;
+ else
+ pp_state = PP_STATE_CG | PP_STATE_LS;
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_MC,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_SDMA,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_HDP,
+ PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_BIF,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_DRM,
+ PP_STATE_SUPPORT_LS,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
+ PP_BLOCK_SYS_ROM,
+ PP_STATE_SUPPORT_CG,
+ pp_state);
+ amd_set_clockgating_by_smu(pp_handle, msg_id);
+
+ return 0;
+}
+
static int vi_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -1825,6 +1892,10 @@ static int vi_common_set_clockgating_state(void *handle,
vi_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
+ case CHIP_TONGA:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ vi_common_set_clockgating_state_by_smu(adev, state);
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index f62b261660d4..11746f22d0c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -373,4 +373,41 @@
#define VCE_CMD_WAIT_GE 0x00000106
#define VCE_CMD_UPDATE_PTB 0x00000107
#define VCE_CMD_FLUSH_TLB 0x00000108
+
+/* mmPA_SC_RASTER_CONFIG mask */
+#define RB_MAP_PKR0(x) ((x) << 0)
+#define RB_MAP_PKR0_MASK (0x3 << 0)
+#define RB_MAP_PKR1(x) ((x) << 2)
+#define RB_MAP_PKR1_MASK (0x3 << 2)
+#define RB_XSEL2(x) ((x) << 4)
+#define RB_XSEL2_MASK (0x3 << 4)
+#define RB_XSEL (1 << 6)
+#define RB_YSEL (1 << 7)
+#define PKR_MAP(x) ((x) << 8)
+#define PKR_MAP_MASK (0x3 << 8)
+#define PKR_XSEL(x) ((x) << 10)
+#define PKR_XSEL_MASK (0x3 << 10)
+#define PKR_YSEL(x) ((x) << 12)
+#define PKR_YSEL_MASK (0x3 << 12)
+#define SC_MAP(x) ((x) << 16)
+#define SC_MAP_MASK (0x3 << 16)
+#define SC_XSEL(x) ((x) << 18)
+#define SC_XSEL_MASK (0x3 << 18)
+#define SC_YSEL(x) ((x) << 20)
+#define SC_YSEL_MASK (0x3 << 20)
+#define SE_MAP(x) ((x) << 24)
+#define SE_MAP_MASK (0x3 << 24)
+#define SE_XSEL(x) ((x) << 26)
+#define SE_XSEL_MASK (0x3 << 26)
+#define SE_YSEL(x) ((x) << 28)
+#define SE_YSEL_MASK (0x3 << 28)
+
+/* mmPA_SC_RASTER_CONFIG_1 mask */
+#define SE_PAIR_MAP(x) ((x) << 0)
+#define SE_PAIR_MAP_MASK (0x3 << 0)
+#define SE_PAIR_XSEL(x) ((x) << 2)
+#define SE_PAIR_XSEL_MASK (0x3 << 2)
+#define SE_PAIR_YSEL(x) ((x) << 4)
+#define SE_PAIR_YSEL_MASK (0x3 << 4)
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
index 8c5608a4d526..c57eff159374 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/si/sid.h
@@ -1398,10 +1398,45 @@
#define DB_DEPTH_INFO 0xA00F
#define PA_SC_RASTER_CONFIG 0xA0D4
+# define RB_MAP_PKR0(x) ((x) << 0)
+# define RB_MAP_PKR0_MASK (0x3 << 0)
+# define RB_MAP_PKR1(x) ((x) << 2)
+# define RB_MAP_PKR1_MASK (0x3 << 2)
# define RASTER_CONFIG_RB_MAP_0 0
# define RASTER_CONFIG_RB_MAP_1 1
# define RASTER_CONFIG_RB_MAP_2 2
# define RASTER_CONFIG_RB_MAP_3 3
+# define RB_XSEL2(x) ((x) << 4)
+# define RB_XSEL2_MASK (0x3 << 4)
+# define RB_XSEL (1 << 6)
+# define RB_YSEL (1 << 7)
+# define PKR_MAP(x) ((x) << 8)
+# define PKR_MAP_MASK (0x3 << 8)
+# define RASTER_CONFIG_PKR_MAP_0 0
+# define RASTER_CONFIG_PKR_MAP_1 1
+# define RASTER_CONFIG_PKR_MAP_2 2
+# define RASTER_CONFIG_PKR_MAP_3 3
+# define PKR_XSEL(x) ((x) << 10)
+# define PKR_XSEL_MASK (0x3 << 10)
+# define PKR_YSEL(x) ((x) << 12)
+# define PKR_YSEL_MASK (0x3 << 12)
+# define SC_MAP(x) ((x) << 16)
+# define SC_MAP_MASK (0x3 << 16)
+# define SC_XSEL(x) ((x) << 18)
+# define SC_XSEL_MASK (0x3 << 18)
+# define SC_YSEL(x) ((x) << 20)
+# define SC_YSEL_MASK (0x3 << 20)
+# define SE_MAP(x) ((x) << 24)
+# define SE_MAP_MASK (0x3 << 24)
+# define RASTER_CONFIG_SE_MAP_0 0
+# define RASTER_CONFIG_SE_MAP_1 1
+# define RASTER_CONFIG_SE_MAP_2 2
+# define RASTER_CONFIG_SE_MAP_3 3
+# define SE_XSEL(x) ((x) << 26)
+# define SE_XSEL_MASK (0x3 << 26)
+# define SE_YSEL(x) ((x) << 28)
+# define SE_YSEL_MASK (0x3 << 28)
+
#define VGT_EVENT_INITIATOR 0xA2A4
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 6aa8938fd826..df7c18b6a02a 100644..100755
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -161,6 +161,7 @@ struct cgs_clock_limits {
*/
struct cgs_firmware_info {
uint16_t version;
+ uint16_t fw_version;
uint16_t feature_version;
uint32_t image_size;
uint64_t mc_addr;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index b1d19409bf86..7174f7a68266 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -191,11 +191,9 @@ static int pp_sw_reset(void *handle)
}
-static int pp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+int amd_set_clockgating_by_smu(void *handle, uint32_t msg_id)
{
struct pp_hwmgr *hwmgr;
- uint32_t msg_id, pp_state;
if (handle == NULL)
return -EINVAL;
@@ -209,76 +207,7 @@ static int pp_set_clockgating_state(void *handle,
return 0;
}
- if (state == AMD_CG_STATE_UNGATE)
- pp_state = 0;
- else
- pp_state = PP_STATE_CG | PP_STATE_LS;
-
- /* Enable/disable GFX blocks clock gating through SMU */
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_3D,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_RLC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_CP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_GFX,
- PP_BLOCK_GFX_MG,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
- /* Enable/disable System blocks clock gating through SMU */
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_BIF,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_MC,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_ROM,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_DRM,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_HDP,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
- msg_id = PP_CG_MSG_ID(PP_GROUP_SYS,
- PP_BLOCK_SYS_SDMA,
- PP_STATE_SUPPORT_CG | PP_STATE_SUPPORT_LS,
- pp_state);
- hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
-
- return 0;
+ return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
}
static int pp_set_powergating_state(void *handle,
@@ -362,7 +291,7 @@ const struct amd_ip_funcs pp_ip_funcs = {
.is_idle = pp_is_idle,
.wait_for_idle = pp_wait_for_idle,
.soft_reset = pp_sw_reset,
- .set_clockgating_state = pp_set_clockgating_state,
+ .set_clockgating_state = NULL,
.set_powergating_state = pp_set_powergating_state,
};
@@ -576,28 +505,6 @@ enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
}
}
-static void
-pp_debugfs_print_current_performance_level(void *handle,
- struct seq_file *m)
-{
- struct pp_hwmgr *hwmgr;
-
- if (handle == NULL)
- return;
-
- hwmgr = ((struct pp_instance *)handle)->hwmgr;
-
- if (hwmgr == NULL || hwmgr->hwmgr_func == NULL)
- return;
-
- if (hwmgr->hwmgr_func->print_current_perforce_level == NULL) {
- printk(KERN_INFO "%s was not implemented.\n", __func__);
- return;
- }
-
- hwmgr->hwmgr_func->print_current_perforce_level(hwmgr, m);
-}
-
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr;
@@ -894,6 +801,25 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
}
+static int pp_dpm_read_sensor(void *handle, int idx, int32_t *value)
+{
+ struct pp_hwmgr *hwmgr;
+
+ if (!handle)
+ return -EINVAL;
+
+ hwmgr = ((struct pp_instance *)handle)->hwmgr;
+
+ PP_CHECK_HW(hwmgr);
+
+ if (hwmgr->hwmgr_func->read_sensor == NULL) {
+ printk(KERN_INFO "%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value);
+}
+
const struct amd_powerplay_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
@@ -906,7 +832,6 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.powergate_vce = pp_dpm_powergate_vce,
.powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
- .print_current_performance_level = pp_debugfs_print_current_performance_level,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
.set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
@@ -920,6 +845,7 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
.set_sclk_od = pp_dpm_set_sclk_od,
.get_mclk_od = pp_dpm_get_mclk_od,
.set_mclk_od = pp_dpm_set_mclk_od,
+ .read_sensor = pp_dpm_read_sensor,
};
static int amd_pp_instance_init(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 635fc4b48184..92b117843875 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -262,6 +262,8 @@ static const pem_event_action * const display_config_change_event[] = {
unblock_adjust_power_state_tasks,
set_cpu_power_state,
notify_hw_power_source_tasks,
+ get_2d_performance_state_tasks,
+ set_performance_state_tasks,
/* updateDALConfigurationTasks,
variBrightDisplayConfigurationChangeTasks, */
adjust_power_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
index 1d1875a7cb2d..489908887e9c 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
@@ -101,11 +101,12 @@ int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
if (requested == NULL)
return 0;
+ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+
if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
equal = false;
if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 6e359c90dfda..5fff1d636ab7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -3,16 +3,12 @@
# It provides the hardware management services for the driver.
HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
- hardwaremanager.o pp_acpi.o cz_hwmgr.o \
- cz_clockpowergating.o tonga_powertune.o\
- process_pptables_v1_0.o ppatomctrl.o \
- tonga_hwmgr.o pppcielanes.o tonga_thermal.o\
- fiji_powertune.o fiji_hwmgr.o tonga_clockpowergating.o \
- fiji_clockpowergating.o fiji_thermal.o \
- polaris10_hwmgr.o polaris10_powertune.o polaris10_thermal.o \
- polaris10_clockpowergating.o iceland_hwmgr.o \
- iceland_clockpowergating.o iceland_thermal.o \
- iceland_powertune.o
+ hardwaremanager.o pp_acpi.o cz_hwmgr.o \
+ cz_clockpowergating.o pppcielanes.o\
+ process_pptables_v1_0.o ppatomctrl.o \
+ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
+ smu7_clockpowergating.o
+
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 5ecef1732e20..7e4fcbbbe086 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1538,78 +1538,6 @@ int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct cz_power_state);
}
-static void
-cz_print_current_perforce_level(struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
-
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
-
- struct phm_vce_clock_voltage_dependency_table *vce_table =
- hwmgr->dyn_state.vce_clock_voltage_dependency_table;
-
- struct phm_uvd_clock_voltage_dependency_table *uvd_table =
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
-
- uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
- uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
- uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
- TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
-
- uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
- uint16_t vddnb, vddgfx;
- int result;
-
- if (sclk_index >= NUM_SCLK_LEVELS) {
- seq_printf(m, "\n invalid sclk dpm profile %d\n", sclk_index);
- } else {
- sclk = table->entries[sclk_index].clk;
- seq_printf(m, "\n index: %u sclk: %u MHz\n", sclk_index, sclk/100);
- }
-
- tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
- CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
- tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
- CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
- seq_printf(m, "\n vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
-
- seq_printf(m, "\n uvd %sabled\n", cz_hwmgr->uvd_power_gated ? "dis" : "en");
- if (!cz_hwmgr->uvd_power_gated) {
- if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "\n invalid uvd dpm level %d\n", uvd_index);
- } else {
- vclk = uvd_table->entries[uvd_index].vclk;
- dclk = uvd_table->entries[uvd_index].dclk;
- seq_printf(m, "\n index: %u uvd vclk: %u MHz dclk: %u MHz\n", uvd_index, vclk/100, dclk/100);
- }
- }
-
- seq_printf(m, "\n vce %sabled\n", cz_hwmgr->vce_power_gated ? "dis" : "en");
- if (!cz_hwmgr->vce_power_gated) {
- if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
- seq_printf(m, "\n invalid vce dpm level %d\n", vce_index);
- } else {
- ecclk = vce_table->entries[vce_index].ecclk;
- seq_printf(m, "\n index: %u vce ecclk: %u MHz\n", vce_index, ecclk/100);
- }
- }
-
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
- if (0 == result) {
- activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
- activity_percent = activity_percent > 100 ? 100 : activity_percent;
- } else {
- activity_percent = 50;
- }
-
- seq_printf(m, "\n [GPU load]: %u %%\n\n", activity_percent);
-}
-
static void cz_hw_print_display_cfg(
const struct cc6_settings *cc6_settings)
{
@@ -1857,6 +1785,107 @@ static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_c
return 0;
}
+static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx, int32_t *value)
+{
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
+ TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
+ uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
+ uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
+ TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
+
+ uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
+ uint16_t vddnb, vddgfx;
+ int result;
+
+ switch (idx) {
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ if (sclk_index < NUM_SCLK_LEVELS) {
+ sclk = table->entries[sclk_index].clk;
+ *value = sclk;
+ return 0;
+ }
+ return -EINVAL;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
+ CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
+ vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ *value = vddnb;
+ return 0;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
+ CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
+ vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ *value = vddgfx;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_VCLK:
+ if (!cz_hwmgr->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ vclk = uvd_table->entries[uvd_index].vclk;
+ *value = vclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_DCLK:
+ if (!cz_hwmgr->uvd_power_gated) {
+ if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ dclk = uvd_table->entries[uvd_index].dclk;
+ *value = dclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_ECCLK:
+ if (!cz_hwmgr->vce_power_gated) {
+ if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
+ return -EINVAL;
+ } else {
+ ecclk = vce_table->entries[vce_index].ecclk;
+ *value = ecclk;
+ return 0;
+ }
+ }
+ *value = 0;
+ return 0;
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ if (0 == result) {
+ activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
+ activity_percent = activity_percent > 100 ? 100 : activity_percent;
+ } else {
+ activity_percent = 50;
+ }
+ *value = activity_percent;
+ return 0;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *value = cz_hwmgr->uvd_power_gated ? 0 : 1;
+ return 0;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *value = cz_hwmgr->vce_power_gated ? 0 : 1;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
@@ -1872,7 +1901,6 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.patch_boot_state = cz_dpm_patch_boot_state,
.get_pp_table_entry = cz_dpm_get_pp_table_entry,
.get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
- .print_current_perforce_level = cz_print_current_perforce_level,
.set_cpu_power_state = cz_set_cpu_power_state,
.store_cc6_data = cz_store_cc6_data,
.force_clock_level = cz_force_clock_level,
@@ -1882,6 +1910,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
.get_clock_by_type = cz_get_clock_by_type,
.get_max_high_clocks = cz_get_max_high_clocks,
+ .read_sensor = cz_read_sensor,
};
int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
deleted file mode 100644
index 5afe82068b29..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "fiji_clockpowergating.h"
-#include "fiji_ppsmc.h"
-#include "fiji_hwmgr.h"
-
-int fiji_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
-
- return 0;
-}
-
-int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->uvd_power_gated == bgate)
- return 0;
-
- data->uvd_power_gated = bgate;
-
- if (bgate) {
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
- fiji_update_uvd_dpm(hwmgr, true);
- } else {
- fiji_update_uvd_dpm(hwmgr, false);
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- }
-
- return 0;
-}
-
-int fiji_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_set_power_state_input states;
- const struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
-
- if (data->vce_power_gated == bgate)
- return 0;
-
- data->vce_power_gated = bgate;
-
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- states.pcurrent_state = &(pcurrent->hardware);
- states.pnew_state = &(requested->hardware);
-
- fiji_update_vce_dpm(hwmgr, &states);
- fiji_enable_disable_vce_dpm(hwmgr, !bgate);
-
- return 0;
-}
-
-int fiji_phm_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->samu_power_gated == bgate)
- return 0;
-
- data->samu_power_gated = bgate;
-
- if (bgate)
- fiji_update_samu_dpm(hwmgr, true);
- else
- fiji_update_samu_dpm(hwmgr, false);
-
- return 0;
-}
-
-int fiji_phm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->acp_power_gated == bgate)
- return 0;
-
- data->acp_power_gated = bgate;
-
- if (bgate)
- fiji_update_acp_dpm(hwmgr, true);
- else
- fiji_update_acp_dpm(hwmgr, false);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
deleted file mode 100644
index 32d43e8fecb2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_dyn_defaults.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_DYN_DEFAULTS_H
-#define FIJI_DYN_DEFAULTS_H
-
-/** \file
-* Volcanic Islands Dynamic default parameters.
-*/
-
-enum FIJIdpm_TrendDetection
-{
- FIJIAdpm_TrendDetection_AUTO,
- FIJIAdpm_TrendDetection_UP,
- FIJIAdpm_TrendDetection_DOWN
-};
-typedef enum FIJIdpm_TrendDetection FIJIdpm_TrendDetection;
-
-/* We need to fill in the default values!!!!!!!!!!!!!!!!!!!!!!! */
-
-/* Bit vector representing same fields as hardware register. */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 /* CP_Gfx_busy ????
- * HDP_busy
- * IH_busy
- * UVD_busy
- * VCE_busy
- * ACP_busy
- * SAMU_busy
- * SDMA enabled */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1 0x000400 /* FE_Gfx_busy - Intended for primary usage. Rest are for flexibility. ????
- * SH_Gfx_busy
- * RB_Gfx_busy
- * VCE_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080 /* SH_Gfx_busy - Intended for primary usage. Rest are for flexibility.
- * FE_Gfx_busy
- * RB_Gfx_busy
- * ACP_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200 /* RB_Gfx_busy - Intended for primary usage. Rest are for flexibility.
- * FE_Gfx_busy
- * SH_Gfx_busy
- * UVD_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680 /* UVD_busy
- * VCE_busy
- * ACP_busy
- * SAMU_busy */
-
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033 /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033 /* GFX, HDP */
-#define PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000 /* GFX, HDP */
-
-
-/* thermal protection counter (units). */
-#define PPFIJI_THERMALPROTECTCOUNTER_DFLT 0x200 /* ~19us */
-
-/* static screen threshold unit */
-#define PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT 0
-
-/* static screen threshold */
-#define PPFIJI_STATICSCREENTHRESHOLD_DFLT 0x00C8
-
-/* gfx idle clock stop threshold */
-#define PPFIJI_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200 /* ~19us with static screen threshold unit of 0 */
-
-/* Fixed reference divider to use when building baby stepping tables. */
-#define PPFIJI_REFERENCEDIVIDER_DFLT 4
-
-/* ULV voltage change delay time
- * Used to be delay_vreg in N.I. split for S.I.
- * Using N.I. delay_vreg value as default
- * ReferenceClock = 2700
- * VoltageResponseTime = 1000
- * VDDCDelayTime = (VoltageResponseTime * ReferenceClock) / 1600 = 1687
- */
-#define PPFIJI_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPFIJI_CGULVPARAMETER_DFLT 0x00040035
-#define PPFIJI_CGULVCONTROL_DFLT 0x00007450
-#define PPFIJI_TARGETACTIVITY_DFLT 30 /* 30%*/
-#define PPFIJI_MCLK_TARGETACTIVITY_DFLT 10 /* 10% */
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
deleted file mode 100644
index 74300d6ef686..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ /dev/null
@@ -1,5600 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "linux/delay.h"
-
-#include "hwmgr.h"
-#include "fiji_smumgr.h"
-#include "atombios.h"
-#include "hardwaremanager.h"
-#include "ppatomctrl.h"
-#include "atombios.h"
-#include "cgs_common.h"
-#include "fiji_dyn_defaults.h"
-#include "fiji_powertune.h"
-#include "smu73.h"
-#include "smu/smu_7_1_3_d.h"
-#include "smu/smu_7_1_3_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-#include "dce/dce_10_0_d.h"
-#include "dce/dce_10_0_sh_mask.h"
-#include "pppcielanes.h"
-#include "fiji_hwmgr.h"
-#include "process_pptables_v1_0.h"
-#include "pptable_v1_0.h"
-#include "pp_debug.h"
-#include "pp_acpi.h"
-#include "amd_pcie_helpers.h"
-#include "cgs_linux.h"
-#include "ppinterrupt.h"
-
-#include "fiji_clockpowergating.h"
-#include "fiji_thermal.h"
-
-#define VOLTAGE_SCALE 4
-#define SMC_RAM_END 0x40000
-#define VDDC_VDDCI_DELTA 300
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-#define MC_CG_ARB_FREQ_F0 0x0a /* boot-up default */
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-/* From smc_reg.h */
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND */
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-#define VDDC_VDDCI_DELTA 300
-
-#define ixSWRST_COMMAND_1 0x1400103
-#define MC_SEQ_CNTL__CAC_EN_MASK 0x40000000
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
- DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
- DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
-};
-
-
-/* [2.5%,~2.5%] Clock stretched is multiple of 2.5% vs
- * not and [Fmin, Fmax, LDO_REFSEL, USE_FOR_LOW_FREQ]
- */
-static const uint16_t fiji_clock_stretcher_lookup_table[2][4] =
-{ {600, 1050, 3, 0}, {600, 1050, 6, 1} };
-
-/* [FF, SS] type, [] 4 voltage ranges, and
- * [Floor Freq, Boundary Freq, VID min , VID max]
- */
-static const uint32_t fiji_clock_stretcher_ddt_table[2][4][4] =
-{ { {265, 529, 120, 128}, {325, 650, 96, 119}, {430, 860, 32, 95}, {0, 0, 0, 31} },
- { {275, 550, 104, 112}, {319, 638, 96, 103}, {360, 720, 64, 95}, {384, 768, 32, 63} } };
-
-/* [Use_For_Low_freq] value, [0%, 5%, 10%, 7.14%, 14.28%, 20%]
- * (coming from PWR_CKS_CNTL.stretch_amount reg spec)
- */
-static const uint8_t fiji_clock_stretch_amount_conversion[2][6] =
-{ {0, 1, 3, 2, 4, 5}, {0, 2, 4, 5, 6, 5} };
-
-static const unsigned long PhwFiji_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-static struct fiji_power_state *cast_phw_fiji_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL;);
-
- return (struct fiji_power_state *)hw_ps;
-}
-
-static const struct
-fiji_power_state *cast_const_phw_fiji_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- PP_ASSERT_WITH_CODE((PhwFiji_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL;);
-
- return (const struct fiji_power_state *)hw_ps;
-}
-
-static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
-}
-
-static void fiji_init_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &data->ulv;
-
- ulv->cg_ulv_parameter = PPFIJI_CGULVPARAMETER_DFLT;
- data->voting_rights_clients0 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPFIJI_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->static_screen_threshold_unit =
- PPFIJI_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->static_screen_threshold =
- PPFIJI_STATICSCREENTHRESHOLD_DFLT;
-
- /* Unset ABM cap as it moved to DAL.
- * Add PHM_PlatformCaps_NonABMSupportInPPLib
- * for re-direct ABM related request to DAL
- */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ABM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicACTiming);
-
- fiji_initialize_power_tune_defaults(hwmgr);
-
- data->mclk_stutter_mode_threshold = 60000;
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-}
-
-static int fiji_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- uint16_t virtual_voltage_id, int32_t *sclk)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
- for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) {
- voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd;
- if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id)
- break;
- }
-
- PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count,
- "Can't find requested voltage id in vdd_dep_on_sclk table!",
- return -EINVAL;
- );
-
- *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk;
-
- return 0;
-}
-
-/**
-* Get Leakage VDDC based on leakage ID.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_get_evv_voltages(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint16_t vv_id;
- uint16_t vddc = 0;
- uint16_t evv_default = 1150;
- uint16_t i, j;
- uint32_t sclk = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- int result;
-
- for (i = 0; i < FIJI_MAX_LEAKAGE_COUNT; i++) {
- vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (!fiji_get_sclk_for_voltage_evv(hwmgr,
- table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- for (j = 1; j < sclk_table->count; j++) {
- if (sclk_table->entries[j].clk == sclk &&
- sclk_table->entries[j].cks_enable == 0) {
- sclk += 5000;
- break;
- }
- }
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableDriverEVV))
- result = atomctrl_calculate_voltage_evv_on_sclk(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc, i, true);
- else
- result = -EINVAL;
-
- if (result)
- result = atomctrl_get_voltage_evv_on_sclk(hwmgr,
- VOLTAGE_TYPE_VDDC, sclk,vv_id, &vddc);
-
- /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
- PP_ASSERT_WITH_CODE((vddc < 2000),
- "Invalid VDDC value, greater than 2v!", result = -EINVAL;);
-
- if (result)
- /* 1.15V is the default safe value for Fiji */
- vddc = evv_default;
-
- /* the voltage should not be zero nor equal to leakage ID */
- if (vddc != 0 && vddc != vv_id) {
- data->vddc_leakage.actual_voltage
- [data->vddc_leakage.count] = vddc;
- data->vddc_leakage.leakage_id
- [data->vddc_leakage.count] = vv_id;
- data->vddc_leakage.count++;
- }
- }
- }
- return 0;
-}
-
-/**
- * Change virtual leakage voltage to actual value.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pointer to changing voltage
- * @param pointer to leakage table
- */
-static void fiji_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
- uint16_t *voltage, struct fiji_leakage_voltage *leakage_table)
-{
- uint32_t index;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (index = 0; index < leakage_table->count; index++) {
- /* if this voltage matches a leakage voltage ID */
- /* patch with actual leakage voltage */
- if (leakage_table->leakage_id[index] == *voltage) {
- *voltage = leakage_table->actual_voltage[index];
- break;
- }
- }
-
- if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- printk(KERN_ERR "Voltage value looks like a Leakage ID but it's not patched \n");
-}
-
-/**
-* Patch voltage lookup table by EVV leakages.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pointer to voltage lookup table
-* @param pointer to leakage table
-* @return always 0
-*/
-static int fiji_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table,
- struct fiji_leakage_voltage *leakage_table)
-{
- uint32_t i;
-
- for (i = 0; i < lookup_table->count; i++)
- fiji_patch_with_vdd_leakage(hwmgr,
- &lookup_table->entries[i].us_vdd, leakage_table);
-
- return 0;
-}
-
-static int fiji_patch_clock_voltage_limits_with_vddc_leakage(
- struct pp_hwmgr *hwmgr, struct fiji_leakage_voltage *leakage_table,
- uint16_t *vddc)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- fiji_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
- hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
- table_info->max_clock_voltage_on_dc.vddc;
- return 0;
-}
-
-static int fiji_patch_voltage_dependency_tables_with_lookup_table(
- struct pp_hwmgr *hwmgr)
-{
- uint8_t entryId;
- uint8_t voltageId;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
-
- for (entryId = 0; entryId < sclk_table->count; ++entryId) {
- voltageId = sclk_table->entries[entryId].vddInd;
- sclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mclk_table->count; ++entryId) {
- voltageId = mclk_table->entries[entryId].vddInd;
- mclk_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- for (entryId = 0; entryId < mm_table->count; ++entryId) {
- voltageId = mm_table->entries[entryId].vddcInd;
- mm_table->entries[entryId].vddc =
- table_info->vddc_lookup_table->entries[voltageId].us_vdd;
- }
-
- return 0;
-
-}
-
-static int fiji_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage. */
- return 0;
-}
-
-static int fiji_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
-{
- /* Need to determine if we need calculated voltage from mm table. */
- return 0;
-}
-
-static int fiji_sort_lookup_table(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- uint32_t table_size, i, j;
- struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
- table_size = lookup_table->count;
-
- PP_ASSERT_WITH_CODE(0 != lookup_table->count,
- "Lookup table is empty", return -EINVAL);
-
- /* Sorting voltages */
- for (i = 0; i < table_size - 1; i++) {
- for (j = i + 1; j > 0; j--) {
- if (lookup_table->entries[j].us_vdd <
- lookup_table->entries[j - 1].us_vdd) {
- tmp_voltage_lookup_record = lookup_table->entries[j - 1];
- lookup_table->entries[j - 1] = lookup_table->entries[j];
- lookup_table->entries[j] = tmp_voltage_lookup_record;
- }
- }
- }
-
- return 0;
-}
-
-static int fiji_complete_dependency_tables(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- int tmp_result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- tmp_result = fiji_patch_lookup_table_with_leakage(hwmgr,
- table_info->vddc_lookup_table, &(data->vddc_leakage));
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
- &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_calc_voltage_dependency_tables(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_calc_mm_voltage_dependency_table(hwmgr);
- if (tmp_result)
- result = tmp_result;
-
- tmp_result = fiji_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
- if(tmp_result)
- result = tmp_result;
-
- return result;
-}
-
-static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
- "VDD dependency on SCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
- "VDD dependency on SCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
- "VDD dependency on MCLK table is missing. \
- This table is mandatory", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. \
- This table is mandatory", return -EINVAL);
-
- data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->entries[0].vddc;
- data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vdd_table->
- entries[allowed_sclk_vdd_table->count - 1].vddc;
-
- table_info->max_clock_voltage_on_ac.sclk =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.mclk =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
- table_info->max_clock_voltage_on_ac.vddc =
- allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
- table_info->max_clock_voltage_on_ac.vddci =
- allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
- table_info->max_clock_voltage_on_ac.sclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
- table_info->max_clock_voltage_on_ac.mclk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
- table_info->max_clock_voltage_on_ac.vddc;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
- table_info->max_clock_voltage_on_ac.vddci;
-
- return 0;
-}
-
-static uint16_t fiji_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speedCntl = 0;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speedCntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-static int fiji_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
-{
- uint32_t link_width;
-
- /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
- link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
-
- PP_ASSERT_WITH_CODE((7 >= link_width),
- "Invalid PCIe lane width!", return 0);
-
- return decode_pcie_lane_width(link_width);
-}
-
-/** Patch the Boot State to match VBIOS boot clocks and voltage.
-*
-* @param hwmgr Pointer to the hardware manager.
-* @param pPowerState The address of the PowerState instance being created.
-*
-*/
-static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
- struct pp_hw_power_state *hw_ps)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_power_state *ps = (struct fiji_power_state *)hw_ps;
- ATOM_FIRMWARE_INFO_V2_2 *fw_info;
- uint16_t size;
- uint8_t frev, crev;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-
- /* First retrieve the Boot clocks and VDDC from the firmware info table.
- * We assume here that fw_info is unchanged if this call fails.
- */
- fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table(
- hwmgr->device, index,
- &size, &frev, &crev);
- if (!fw_info)
- /* During a test, there is no firmware info table. */
- return 0;
-
- /* Patch the state. */
- data->vbios_boot_state.sclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultEngineClock);
- data->vbios_boot_state.mclk_bootup_value =
- le32_to_cpu(fw_info->ulDefaultMemoryClock);
- data->vbios_boot_state.mvdd_bootup_value =
- le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
- data->vbios_boot_state.vddc_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCVoltage);
- data->vbios_boot_state.vddci_bootup_value =
- le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
- data->vbios_boot_state.pcie_gen_bootup_value =
- fiji_get_current_pcie_speed(hwmgr);
- data->vbios_boot_state.pcie_lane_bootup_value =
- (uint16_t)fiji_get_current_pcie_lane_number(hwmgr);
-
- /* set boot power state */
- ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
- ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
- ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
- ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
-
- return 0;
-}
-
-static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data;
- uint32_t i;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- bool stay_in_boot;
- int result;
-
- data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- hwmgr->backend = data;
-
- data->dll_default_on = false;
- data->sram_end = SMC_RAM_END;
-
- for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
- data->activity_target[i] = FIJI_AT_DFLT;
-
- data->vddc_vddci_delta = VDDC_VDDCI_DELTA;
-
- data->mclk_activity_target = PPFIJI_MCLK_TARGETACTIVITY_DFLT;
- data->mclk_dpm0_activity_target = 0xa;
-
- data->sclk_dpm_key_disabled = 0;
- data->mclk_dpm_key_disabled = 0;
- data->pcie_dpm_key_disabled = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- data->gpio_debug = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- /* need to set voltage control types before EVV patching */
- data->voltage_control = FIJI_VOLTAGE_CONTROL_NONE;
- data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
- data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl))
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
- data->mvdd_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
-
- if (data->mvdd_control == FIJI_VOLTAGE_CONTROL_NONE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- data->vddci_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (data->vddci_control == FIJI_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- if (table_info && table_info->cac_dtp_table->usClockStretchAmount)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
-
- fiji_init_dpm_defaults(hwmgr);
-
- /* Get leakage voltage based on leakage ID. */
- fiji_get_evv_voltages(hwmgr);
-
- /* Patch our voltage dependency table with actual leakage voltage
- * We need to perform leakage translation before it's used by other functions
- */
- fiji_complete_dependency_tables(hwmgr);
-
- /* Parse pptable data read from VBIOS */
- fiji_set_private_data_based_on_pptable(hwmgr);
-
- /* ULV Support */
- data->ulv.ulv_supported = true; /* ULV feature is enabled by default */
-
- /* Initalize Dynamic State Adjustment Rule Settings */
- result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
-
- if (!result) {
- data->uvd_enabled = false;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
- data->vddc_phase_shed_control = false;
- }
-
- stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StayInBootState);
-
- if (0 == result) {
- struct cgs_system_info sys_info = {0};
-
- data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- FIJI_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
- if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp &&
- hwmgr->thermal_controller.
- advanceFanControlParameters.ucFanControlMode) {
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
- table_info->cac_dtp_table->usOperatingTempMinLimit;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
- table_info->cac_dtp_table->usOperatingTempMaxLimit;
- hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
- table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
- hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
- table_info->cac_dtp_table->usOperatingTempStep;
- hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
- table_info->cac_dtp_table->usTargetOperatingTemp;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport);
- }
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- fiji_hwmgr_backend_fini(hwmgr);
- }
-
- return 0;
-}
-
-/**
- * Read clock related registers.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_2);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_3);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_FUNC_CNTL_4);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_SPREAD_SPECTRUM);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_SPLL_SPREAD_SPECTRUM_2);
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Initialize PowerGating States for different engines
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int fiji_init_power_gate_state(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
- data->samu_power_gated = false;
- data->acp_power_gated = false;
- data->pg_acp_init = true;
-
- return 0;
-}
-
-static int fiji_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-static int fiji_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = fiji_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = fiji_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = fiji_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = fiji_init_power_gate_state(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init power gate state!", result = tmp_result);
-
- tmp_result = tonga_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = fiji_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-/**
-* Checks if we want to support voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-*/
-static bool fiji_voltage_control(const struct pp_hwmgr *hwmgr)
-{
- const struct fiji_hwmgr *data =
- (const struct fiji_hwmgr *)(hwmgr->backend);
-
- return (FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control);
-}
-
-/**
-* Enable voltage control
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-/**
-* Remove repeated voltage values and create table with unique values.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param vol_table the pointer to changing voltage table
-* @return 0 in success
-*/
-
-static int fiji_trim_voltage_table(struct pp_hwmgr *hwmgr,
- struct pp_atomctrl_voltage_table *vol_table)
-{
- uint32_t i, j;
- uint16_t vvalue;
- bool found = false;
- struct pp_atomctrl_voltage_table *table;
-
- PP_ASSERT_WITH_CODE((NULL != vol_table),
- "Voltage Table empty.", return -EINVAL);
- table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
- GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- table->mask_low = vol_table->mask_low;
- table->phase_delay = vol_table->phase_delay;
-
- for (i = 0; i < vol_table->count; i++) {
- vvalue = vol_table->entries[i].value;
- found = false;
-
- for (j = 0; j < table->count; j++) {
- if (vvalue == table->entries[j].value) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- table->entries[table->count].value = vvalue;
- table->entries[table->count].smio_low =
- vol_table->entries[i].smio_low;
- table->count++;
- }
- }
-
- memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
- kfree(table);
-
- return 0;
-}
-
-static int fiji_get_svi2_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->mvdd_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].mvdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = fiji_trim_voltage_table(hwmgr, vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim MVDD table.", return result);
-
- return 0;
-}
-
-static int fiji_get_svi2_vddci_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_clock_voltage_dependency_table *dep_table)
-{
- uint32_t i;
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->vddci_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != dep_table->count),
- "Voltage Dependency Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
- vol_table->count = dep_table->count;
-
- for (i = 0; i < dep_table->count; i++) {
- vol_table->entries[i].value = dep_table->entries[i].vddci;
- vol_table->entries[i].smio_low = 0;
- }
-
- result = fiji_trim_voltage_table(hwmgr, vol_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to trim VDDCI table.", return result);
-
- return 0;
-}
-
-static int fiji_get_svi2_vdd_voltage_table(struct pp_hwmgr *hwmgr,
- phm_ppt_v1_voltage_lookup_table *lookup_table)
-{
- int i = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vol_table = &(data->vddc_voltage_table);
-
- PP_ASSERT_WITH_CODE((0 != lookup_table->count),
- "Voltage Lookup Table empty.", return -EINVAL);
-
- vol_table->mask_low = 0;
- vol_table->phase_delay = 0;
-
- vol_table->count = lookup_table->count;
-
- for (i = 0; i < vol_table->count; i++) {
- vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
- vol_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-/* ---- Voltage Tables ----
- * If the voltage table would be bigger than
- * what will fit into the state table on
- * the SMC keep only the higher entries.
- */
-static void fiji_trim_voltage_table_to_fit_state_table(struct pp_hwmgr *hwmgr,
- uint32_t max_vol_steps, struct pp_atomctrl_voltage_table *vol_table)
-{
- unsigned int i, diff;
-
- if (vol_table->count <= max_vol_steps)
- return;
-
- diff = vol_table->count - max_vol_steps;
-
- for (i = 0; i < max_vol_steps; i++)
- vol_table->entries[i] = vol_table->entries[i + diff];
-
- vol_table->count = max_vol_steps;
-
- return;
-}
-
-/**
-* Create Voltage Tables.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- int result;
-
- if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
- &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve MVDD table.",
- return result);
- } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- result = fiji_get_svi2_mvdd_voltage_table(hwmgr,
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
- return result;);
- }
-
- if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
- &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.",
- return result);
- } else if (FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- result = fiji_get_svi2_vddci_voltage_table(hwmgr,
- table_info->vdd_dep_on_mclk);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
- return result);
- }
-
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- result = fiji_get_svi2_vdd_voltage_table(hwmgr,
- table_info->vddc_lookup_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from lookup table.",
- return result);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU73_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_VDDC, &(data->vddc_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU73_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table)));
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU73_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- fiji_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU73_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table)));
-
- return 0;
-}
-
-static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- /* Program additional LP registers
- * that are no longer programmed by VBIOS
- */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP,
- cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
-
- return 0;
-}
-
-/**
-* Programs static screed detection parameters
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_program_static_screen_threshold_parameters(
- struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
-* Setup display gap for glitch free memory clock switching.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t displayGap =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL);
-
- displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, DISPLAY_GAP_IGNORE);
-
- displayGap = PHM_SET_FIELD(displayGap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, displayGap);
-
- return 0;
-}
-
-/**
-* Programs activity state transition voting clients
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
-{
- /* Reset voting clients before disabling DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
-
- return 0;
-}
-
-/**
-* Get the location of various tables inside the FW image.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
- uint32_t tmp;
- int result;
- bool error = false;
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result)
- data->dpm_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (!result) {
- data->soft_regs_start = tmp;
- smu_data->soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->mc_reg_table_start = tmp;
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->fan_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (!result)
- data->arb_table_start = tmp;
-
- error |= (0 != result);
-
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (!result)
- hwmgr->microcode_version_info.SMC = tmp;
-
- error |= (0 != result);
-
- return error ? -1 : 0;
-}
-
-/* Copy one arb setting to another and then switch the active set.
- * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
- */
-static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arb_src, uint32_t arb_dest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arb_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_dest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
-
- return 0;
-}
-
-/**
-* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return if success then 0;
-*/
-static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
-}
-
-/**
-* Initial switch from ARB F0->F1
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-* This function is to be called from the SetPowerState table.
-*/
-static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return fiji_copy_and_switch_arb_sets(hwmgr,
- MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
-{
- uint32_t tmp;
-
- tmp = (cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
- 0x0000ff00) >> 8;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return fiji_copy_and_switch_arb_sets(hwmgr,
- tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
- struct fiji_single_dpm_table *dpm_table, uint32_t count)
-{
- int i;
- PP_ASSERT_WITH_CODE(count <= MAX_REGULAR_DPM_NUMBER,
- "Fatal error, can not set up single DPM table entries "
- "to exceed max number!",);
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
- dpm_table->dpm_levels[i].enabled = false;
-
- return 0;
-}
-
-static void fiji_setup_pcie_table_entry(
- struct fiji_single_dpm_table *dpm_table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint32_t i, max_entry;
-
- PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
- data->use_pcie_power_saving_levels), "No pcie performance levels!",
- return -EINVAL);
-
- if (data->use_pcie_performance_levels &&
- !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels &&
- data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.pcie_speed_table, SMU73_MAX_LEVELS_LINK);
-
- if (pcie_table != NULL) {
- /* max_entry is used to make sure we reserve one PCIE level
- * for boot level (fix for A+A PSPP issue).
- * If PCIE table from PPTable have ULV entry + 8 entries,
- * then ignore the last entry.*/
- max_entry = (SMU73_MAX_LEVELS_LINK < pcie_table->count) ?
- SMU73_MAX_LEVELS_LINK : pcie_table->count;
- for (i = 1; i < max_entry; i++) {
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- pcie_table->entries[i].gen_speed),
- get_pcie_lane_support(data->pcie_lane_cap,
- pcie_table->entries[i].lane_width));
- }
- data->dpm_table.pcie_speed_table.count = max_entry - 1;
- } else {
- /* Hardcode Pcie Table */
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- data->dpm_table.pcie_speed_table.count = 6;
- }
- /* Populate last level for boot PCIE level, but do not increment count. */
- fiji_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
- data->dpm_table.pcie_speed_table.count,
- get_pcie_gen_support(data->pcie_gen_cap,
- PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap,
- PP_Max_PCIELane));
-
- return 0;
-}
-
-/*
- * This function is to initalize all DPM state tables
- * for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these
- * state tables to the allowed range based
- * on the power policy or external client requests,
- * such as UVD request, etc.
- */
-static int fiji_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i;
-
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing. "
- "This table is mandatory",
- return -EINVAL);
-
- PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory",
- return -EINVAL);
- PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
- "MCLK dependency table has to have is missing. "
- "This table is mandatory",
- return -EINVAL);
-
- /* clear the state table to reset everything to default */
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.sclk_table, SMU73_MAX_LEVELS_GRAPHICS);
- fiji_reset_single_dpm_table(hwmgr,
- &data->dpm_table.mclk_table, SMU73_MAX_LEVELS_MEMORY);
-
- /* Initialize Sclk DPM table based on allow Sclk values */
- data->dpm_table.sclk_table.count = 0;
- for (i = 0; i < dep_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count - 1].value !=
- dep_sclk_table->entries[i].clk) {
- data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count].value =
- dep_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels
- [data->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.sclk_table.count++;
- }
- }
-
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i=0; i<dep_mclk_table->count; i++) {
- if ( i==0 || data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count - 1].value !=
- dep_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count].value =
- dep_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels
- [data->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* setup PCIE gen speed levels */
- fiji_setup_default_pcie_table(hwmgr);
-
- /* save a copy of the default DPM table */
- memcpy(&(data->golden_dpm_table), &(data->dpm_table),
- sizeof(struct fiji_dpm_table));
-
- return 0;
-}
-
-/**
- * @brief PhwFiji_GetVoltageOrder
- * Returns index of requested voltage record in lookup(table)
- * @param lookup_table - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-static uint8_t fiji_get_voltage_index(
- struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
-{
- uint8_t count = (uint8_t) (lookup_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != lookup_table),
- "Lookup Table empty.", return 0);
- PP_ASSERT_WITH_CODE((0 != count),
- "Lookup Table empty.", return 0);
-
- for (i = 0; i < lookup_table->count; i++) {
- /* find first voltage equal or bigger than requested */
- if (lookup_table->entries[i].us_vdd >= voltage)
- return i;
- }
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-/**
-* Preparation of vddc and vddgfx CAC tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- uint32_t count;
- uint8_t index;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_voltage_lookup_table *lookup_table =
- table_info->vddc_lookup_table;
- /* tables is already swapped, so in order to use the value from it,
- * we need to swap it back.
- * We are populating vddc CAC data to BapmVddc table
- * in split and merged mode
- */
- for( count = 0; count<lookup_table->count; count++) {
- index = fiji_get_voltage_index(lookup_table,
- data->vddc_voltage_table.entries[count].value);
- table->BapmVddcVidLoSidd[count] = (uint8_t) ((6200 -
- (lookup_table->entries[index].us_cac_low *
- VOLTAGE_SCALE)) / 25);
- table->BapmVddcVidHiSidd[count] = (uint8_t) ((6200 -
- (lookup_table->entries[index].us_cac_high *
- VOLTAGE_SCALE)) / 25);
- }
-
- return 0;
-}
-
-/**
-* Preparation of voltage tables for SMC.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-
-static int fiji_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result;
-
- result = fiji_populate_cac_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate CAC voltage tables to SMC",
- return -EINVAL);
-
- return 0;
-}
-
-static int fiji_populate_ulv_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_Ulv *state)
-{
- int result = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset;
- state->VddcOffsetVid = (uint8_t)( table_info->us_ulv_voltage_offset *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1 );
-
- state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
- }
- return result;
-}
-
-static int fiji_populate_ulv_state(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- return fiji_populate_ulv_level(hwmgr, &table->Ulv);
-}
-
-static int32_t fiji_get_dpm_level_enable_mask_value(
- struct fiji_single_dpm_table* dpm_table)
-{
- int32_t i;
- int32_t mask = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask = mask << 1;
- if (dpm_table->dpm_levels[i - 1].enabled)
- mask |= 0x1;
- else
- mask &= 0xFFFFFFFE;
- }
- return mask;
-}
-
-static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- int i;
-
- /* Index (dpm_table->pcie_speed_table.count)
- * is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width(
- dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-/**
-* Calculates the SCLK dividers using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_GraphicsLevel *sclk)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t ref_clock;
- uint32_t ref_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
- ref_clock = atomctrl_get_reference_clock(hwmgr);
- ref_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider */
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup */
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
- SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- struct pp_atomctrl_internal_ss_info ssInfo;
-
- uint32_t vco_freq = clock * dividers.uc_pll_post_div;
- if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
- vco_freq, &ssInfo)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- *
- * clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2
- */
- uint32_t clk_s = ref_clock * 5 /
- (ref_divider * ssInfo.speed_spectrum_rate);
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clk_v = 4 * ssInfo.speed_spectrum_percentage *
- fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
- cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
- CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
- }
- }
-
- sclk->SclkFrequency = clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci)
-{
- uint32_t i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct pp_atomctrl_voltage_table *vddci_table =
- &(data->vddci_voltage_table);
-
- for (i = 0; i < vddci_table->count; i++) {
- if (vddci_table->entries[i].value >= vddci)
- return vddci_table->entries[i].value;
- }
-
- PP_ASSERT_WITH_CODE(false,
- "VDDCI is larger than max VDDCI in VDDCI Voltage Table!",
- return vddci_table->entries[i-1].value);
-}
-
-static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_ppt_v1_clock_voltage_dependency_table* dep_table,
- uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
-{
- uint32_t i;
- uint16_t vddci;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- *voltage = *mvdd = 0;
-
- /* clock - voltage dependency table is empty table */
- if (dep_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < dep_table->count; i++) {
- /* find first sclk bigger than request */
- if (dep_table->entries[i].clk >= clock) {
- *voltage |= (dep_table->entries[i].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i].vddci)
- *voltage |= (dep_table->entries[i].vddci *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else {
- vddci = fiji_find_closest_vddci(hwmgr,
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i].mvdd *
- VOLTAGE_SCALE;
-
- *voltage |= 1 << PHASES_SHIFT;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->vddci_control)
- *voltage |= (data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- else if (dep_table->entries[i-1].vddci) {
- vddci = fiji_find_closest_vddci(hwmgr,
- (dep_table->entries[i].vddc -
- (uint16_t)data->vddc_vddci_delta));
- *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
- }
-
- if (FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control)
- *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
- else if (dep_table->entries[i].mvdd)
- *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE;
-
- return 0;
-}
-
-static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
- uint32_t clock_insr)
-{
- uint8_t i;
- uint32_t temp;
- uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
-
- PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
- for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- temp = clock >> i;
-
- if (temp >= min || i == 0)
- break;
- }
- return i;
-}
-/**
-* Populates single SMC SCLK structure using the provided engine clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the engine clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-
-static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, uint16_t sclk_al_threshold,
- struct SMU73_Discrete_GraphicsLevel *level)
-{
- int result;
- /* PP_Clocks minClocks; */
- uint32_t threshold, mvdd;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- result = fiji_calculate_sclk_params(hwmgr, clock, level);
-
- /* populate graphics levels */
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk, clock,
- &level->MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for "
- "VDDC engine clock dependency table",
- return result);
-
- level->SclkFrequency = clock;
- level->ActivityLevel = sclk_al_threshold;
- level->CcPwrDynRm = 0;
- level->CcPwrDynRm1 = 0;
- level->EnabledForActivity = 0;
- level->EnabledForThrottle = 1;
- level->UpHyst = 10;
- level->DownHyst = 0;
- level->VoltageDownHyst = 0;
- level->PowerThrottle = 0;
-
- threshold = clock * data->fast_watermark_threshold / 100;
-
-
- data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
- level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
- hwmgr->display_config.min_core_set_clock_in_sr);
-
-
- /* Default to slow, highest DPM level will be
- * set to PPSMC_DISPLAY_WATERMARK_LOW later.
- */
- level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
-
- return 0;
-}
-/**
-* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
- uint8_t pcie_entry_cnt = (uint8_t) data->dpm_table.pcie_speed_table.count;
- int result = 0;
- uint32_t array = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU73_Discrete_GraphicsLevel) *
- SMU73_MAX_LEVELS_GRAPHICS;
- struct SMU73_Discrete_GraphicsLevel *levels =
- data->smc_state_table.GraphicsLevel;
- uint32_t i, max_entry;
- uint8_t hightest_pcie_level_enabled = 0,
- lowest_pcie_level_enabled = 0,
- mid_pcie_level_enabled = 0,
- count = 0;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = fiji_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &levels[i]);
- if (result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- levels[i].DeepSleepDivId = 0;
- }
-
- /* Only enable level 0 for now.*/
- levels[0].EnabledForActivity = 1;
-
- /* set highest level watermark to high */
- levels[dpm_table->sclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- if (pcie_table != NULL) {
- PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt),
- "There must be 1 or more PCIE levels defined in PPTable.",
- return -EINVAL);
- max_entry = pcie_entry_cnt - 1;
- for (i = 0; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel =
- (uint8_t) ((i < max_entry)? i : max_entry);
- } else {
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (hightest_pcie_level_enabled + 1))) != 0 ))
- hightest_pcie_level_enabled++;
-
- while (data->dpm_level_enable_mask.pcie_dpm_enable_mask &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0 ))
- lowest_pcie_level_enabled++;
-
- while ((count < hightest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0 ))
- count++;
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1+ count) <
- hightest_pcie_level_enabled?
- (lowest_pcie_level_enabled + 1 + count) :
- hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to hightest_pcie_level_enabled */
- for(i = 2; i < dpm_table->sclk_table.count; i++)
- levels[i].pcieDpmLevel = hightest_pcie_level_enabled;
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled */
- levels[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled */
- levels[1].pcieDpmLevel = mid_pcie_level_enabled;
- }
- /* level count will send to smc once at init smc table and never change */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
- * MCLK Frequency Ratio
- * SEQ_CG_RESP Bit[31:24] - 0x0
- * Bit[27:24] \96 DDR3 Frequency ratio
- * 0x0 <= 100MHz, 450 < 0x8 <= 500MHz
- * 100 < 0x1 <= 150MHz, 500 < 0x9 <= 550MHz
- * 150 < 0x2 <= 200MHz, 550 < 0xA <= 600MHz
- * 200 < 0x3 <= 250MHz, 600 < 0xB <= 650MHz
- * 250 < 0x4 <= 300MHz, 650 < 0xC <= 700MHz
- * 300 < 0x5 <= 350MHz, 700 < 0xD <= 750MHz
- * 350 < 0x6 <= 400MHz, 750 < 0xE <= 800MHz
- * 400 < 0x7 <= 450MHz, 800 < 0xF
- */
-static uint8_t fiji_get_mclk_frequency_ratio(uint32_t mem_clock)
-{
- if (mem_clock <= 10000) return 0x0;
- if (mem_clock <= 15000) return 0x1;
- if (mem_clock <= 20000) return 0x2;
- if (mem_clock <= 25000) return 0x3;
- if (mem_clock <= 30000) return 0x4;
- if (mem_clock <= 35000) return 0x5;
- if (mem_clock <= 40000) return 0x6;
- if (mem_clock <= 45000) return 0x7;
- if (mem_clock <= 50000) return 0x8;
- if (mem_clock <= 55000) return 0x9;
- if (mem_clock <= 60000) return 0xa;
- if (mem_clock <= 65000) return 0xb;
- if (mem_clock <= 70000) return 0xc;
- if (mem_clock <= 75000) return 0xd;
- if (mem_clock <= 80000) return 0xe;
- /* mem_clock > 800MHz */
- return 0xf;
-}
-
-/**
-* Populates the SMC MCLK structure using the provided memory clock
-*
-* @param hwmgr the address of the hardware manager
-* @param clock the memory clock to use to populate the structure
-* @param sclk the SMC SCLK structure to be populated
-*/
-static int fiji_calculate_mclk_params(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mclk)
-{
- struct pp_atomctrl_memory_clock_param mem_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_vi(hwmgr, clock, &mem_param);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to get Memory PLL Dividers.",);
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = clock;
- mclk->MclkDivider = (uint8_t)mem_param.mpll_post_divider;
- mclk->FreqRange = fiji_get_mclk_frequency_ratio(clock);
-
- return result;
-}
-
-static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
- uint32_t clock, struct SMU73_Discrete_MemoryLevel *mem_level)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int result = 0;
-
- if (table_info->vdd_dep_on_mclk) {
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk, clock,
- &mem_level->MinVoltage, &mem_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory "
- "VDDC voltage dependency table", return result);
- }
-
- mem_level->EnabledForThrottle = 1;
- mem_level->EnabledForActivity = 0;
- mem_level->UpHyst = 0;
- mem_level->DownHyst = 100;
- mem_level->VoltageDownHyst = 0;
- mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- mem_level->StutterEnable = false;
-
- mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- /* enable stutter mode if all the follow condition applied
- * PECI_GetNumberOfActiveDisplays(hwmgr->pPECI,
- * &(data->DisplayTiming.numExistingDisplays));
- */
- data->display_timing.num_existing_displays = 1;
-
- if ((data->mclk_stutter_mode_threshold) &&
- (clock <= data->mclk_stutter_mode_threshold) &&
- (!data->is_uvd_enabled) &&
- (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
- STUTTER_ENABLE) & 0x1))
- mem_level->StutterEnable = true;
-
- result = fiji_calculate_mclk_params(hwmgr, clock, mem_level);
- if (!result) {
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage);
- }
- return result;
-}
-
-/**
-* Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
-*
-* @param hwmgr the address of the hardware manager
-*/
-static int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t array = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, MemoryLevel);
- uint32_t array_size = sizeof(SMU73_Discrete_MemoryLevel) *
- SMU73_MAX_LEVELS_MEMORY;
- struct SMU73_Discrete_MemoryLevel *levels =
- data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero",
- return -EINVAL);
- result = fiji_populate_single_memory_level(hwmgr,
- dpm_table->mclk_table.dpm_levels[i].value,
- &levels[i]);
- if (result)
- return result;
- }
-
- /* Only enable level 0 for now. */
- levels[0].EnabledForActivity = 1;
-
- /* in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in
- * a higher state by default such that we are not effected by
- * up threshold or and MCLK DPM latency.
- */
- levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
- CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount =
- (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high */
- levels[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
- (uint32_t)array_size, data->sram_end);
-
- return result;
-}
-
-/**
-* Populates the SMC MVDD structure using the provided memory clock.
-*
-* @param hwmgr the address of the hardware manager
-* @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
-* @param voltage the SMC VOLTAGE structure to be populated
-*/
-static int fiji_populate_mvdd_value(struct pp_hwmgr *hwmgr,
- uint32_t mclk, SMIO_Pattern *smio_pat)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t i = 0;
-
- if (FIJI_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) {
- if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) {
- smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
- PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count,
- "MVDD Voltage is outside the supported range.",
- return -EINVAL);
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_atomctrl_clock_dividers_vi dividers;
- SMIO_Pattern vol_level;
- uint32_t mvdd;
- uint16_t us_mvdd;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (!data->sclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0,
- * already converted to SMC_UL */
- table->ACPILevel.SclkFrequency =
- data->dpm_table.sclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_sclk,
- table->ACPILevel.SclkFrequency,
- &table->ACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDC voltage value "
- "in Clock Dependency Table",);
- } else {
- table->ACPILevel.SclkFrequency =
- data->vbios_boot_state.sclk_bootup_value;
- table->ACPILevel.MinVoltage =
- data->vbios_boot_state.vddc_bootup_value * VOLTAGE_SCALE;
- }
-
- /* get the engine clock dividers for this clock value */
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.",
- return result);
-
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
- SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2, CG_SPLL_FUNC_CNTL_2,
- SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- if (!data->mclk_dpm_key_disabled) {
- /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
- table->MemoryACPILevel.MclkFrequency =
- data->dpm_table.mclk_table.dpm_levels[0].value;
- result = fiji_get_dependency_volt_by_clk(hwmgr,
- table_info->vdd_dep_on_mclk,
- table->MemoryACPILevel.MclkFrequency,
- &table->MemoryACPILevel.MinVoltage, &mvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "Cannot find ACPI VDDCI voltage value "
- "in Clock Dependency Table",);
- } else {
- table->MemoryACPILevel.MclkFrequency =
- data->vbios_boot_state.mclk_bootup_value;
- table->MemoryACPILevel.MinVoltage =
- data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE;
- }
-
- us_mvdd = 0;
- if ((FIJI_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!fiji_populate_mvdd_value(hwmgr,
- data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
-
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(us_mvdd * VOLTAGE_SCALE);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- table->MemoryACPILevel.ActivityLevel =
- PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);
-
- return result;
-}
-
-static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->VceLevelCount = (uint8_t)(mm_table->count);
- table->VceBootLevel = 0;
-
- for(count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency = mm_table->entries[count].eclk;
- table->VceLevel[count].MinVoltage = 0;
- table->VceLevel[count].MinVoltage |=
- (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- table->VceLevel[count].MinVoltage |=
- ((mm_table->entries[count].vddc - data->vddc_vddci_delta) *
- VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /*retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->VceLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for VCE engine clock",
- return result);
-
- table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->AcpLevelCount = (uint8_t)(mm_table->count);
- table->AcpBootLevel = 0;
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency = mm_table->entries[count].aclk;
- table->AcpLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->AcpLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->AcpLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->AcpLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for engine clock", return result);
-
- table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->SamuBootLevel = 0;
- table->SamuLevelCount = (uint8_t)(mm_table->count);
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- /* not sure whether we need evclk or not */
- table->SamuLevel[count].MinVoltage = 0;
- table->SamuLevel[count].Frequency = mm_table->entries[count].samclock;
- table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->SamuLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->SamuLevel[count].Frequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for samu clock", return result);
-
- table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage);
- }
- return result;
-}
-
-static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
- int32_t eng_clock, int32_t mem_clock,
- struct SMU73_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- uint32_t dram_timing;
- uint32_t dram_timing2;
- uint32_t burstTime;
- ULONG state, trrds, trrdl;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- eng_clock, mem_clock);
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME);
-
- state = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, STATE0);
- trrds = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDS0);
- trrdl = PHM_GET_FIELD(burstTime, MC_ARB_BURST_TIME, TRRDL0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
- arb_regs->TRRDS = (uint8_t)trrds;
- arb_regs->TRRDL = (uint8_t)trrdl;
-
- return 0;
-}
-
-static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
- int result = 0;
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = fiji_populate_memory_timing_parameters(hwmgr,
- data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (result)
- break;
- }
- }
-
- if (!result)
- result = fiji_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU73_Discrete_MCArbDramTimingTable),
- data->sram_end);
- return result;
-}
-
-static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = -EINVAL;
- uint8_t count;
- struct pp_atomctrl_clock_dividers_vi dividers;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
- table_info->mm_dep_table;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->UvdLevelCount = (uint8_t)(mm_table->count);
- table->UvdBootLevel = 0;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].MinVoltage = 0;
- table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk;
- table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk;
- table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc *
- VOLTAGE_SCALE) << VDDC_SHIFT;
- table->UvdLevel[count].MinVoltage |= ((mm_table->entries[count].vddc -
- data->vddc_vddci_delta) * VOLTAGE_SCALE) << VDDCI_SHIFT;
- table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT;
-
- /* retrieve divider value for VBIOS */
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].VclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Vclk clock", return result);
-
- table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
-
- result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
- table->UvdLevel[count].DclkFrequency, &dividers);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find divide id for Dclk clock", return result);
-
- table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage);
-
- }
- return result;
-}
-
-static int fiji_find_boot_level(struct fiji_single_dpm_table *table,
- uint32_t value, uint32_t *boot_level)
-{
- int result = -EINVAL;
- uint32_t i;
-
- for (i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
- return result;
-}
-
-static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- int result = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = fiji_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(table->GraphicsBootLevel));
-
- result = fiji_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(table->MemoryBootLevel));
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value *
- VOLTAGE_SCALE;
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value *
- VOLTAGE_SCALE;
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value *
- VOLTAGE_SCALE;
-
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci);
- CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd);
-
- return 0;
-}
-
-static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint8_t count, level;
-
- count = (uint8_t)(table_info->vdd_dep_on_sclk->count);
- for (level = 0; level < count; level++) {
- if(table_info->vdd_dep_on_sclk->entries[level].clk >=
- data->vbios_boot_state.sclk_bootup_value) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(table_info->vdd_dep_on_mclk->count);
- for (level = 0; level < count; level++) {
- if(table_info->vdd_dep_on_mclk->entries[level].clk >=
- data->vbios_boot_state.mclk_bootup_value) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
- volt_with_cks, value;
- uint16_t clock_freq_u16;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
- volt_offset = 0;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
- table_info->vdd_dep_on_sclk;
-
- stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount;
-
- /* Read SMU_Eefuse to read and calculate RO and determine
- * if the part is SS or FF. if RO >= 1660MHz, part is FF.
- */
- efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (146 * 4));
- efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixSMU_EFUSE_0 + (148 * 4));
- efuse &= 0xFF000000;
- efuse = efuse >> 24;
- efuse2 &= 0xF;
-
- if (efuse2 == 1)
- ro = (2300 - 1350) * efuse / 255 + 1350;
- else
- ro = (2500 - 1000) * efuse / 255 + 1000;
-
- if (ro >= 1660)
- type = 0;
- else
- type = 1;
-
- /* Populate Stretch amount */
- data->smc_state_table.ClockStretcherAmount = stretch_amount;
-
- /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
- for (i = 0; i < sclk_table->count; i++) {
- data->smc_state_table.Sclk_CKS_masterEn0_7 |=
- sclk_table->entries[i].cks_enable << i;
- volt_without_cks = (uint32_t)((14041 *
- (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
- (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
- volt_with_cks = (uint32_t)((13946 *
- (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
- (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
- if (volt_without_cks >= volt_with_cks)
- volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
- sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
- data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
- }
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- STRETCH_ENABLE, 0x0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- staticEnable, 0x1);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
- masterReset, 0x0);
-
- /* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher);
- PP_ASSERT_WITH_CODE(false,
- "Stretch Amount in PPTable not supported\n",
- return -EINVAL);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL);
- value &= 0xFFC2FF87;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][0];
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
- fiji_clock_stretcher_lookup_table[stretch_amount2][1];
- clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
- GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].
- SclkFrequency) / 100);
- if (fiji_clock_stretcher_lookup_table[stretch_amount2][0] <
- clock_freq_u16 &&
- fiji_clock_stretcher_lookup_table[stretch_amount2][1] >
- clock_freq_u16) {
- /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
- /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
- value |= (fiji_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
- /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
- value |= (fiji_clock_stretch_amount_conversion
- [fiji_clock_stretcher_lookup_table[stretch_amount2][3]]
- [stretch_amount]) << 3;
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].minFreq);
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.
- CKS_LOOKUPTableEntry[0].maxFreq);
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
- fiji_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
- data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
- (fiji_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixPWR_CKS_CNTL, value);
-
- /* Populate DDT Lookup Table */
- for (i = 0; i < 4; i++) {
- /* Assign the minimum and maximum VID stored
- * in the last row of Clock Stretcher Voltage Table.
- */
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].minVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][2];
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].maxVID =
- (uint8_t) fiji_clock_stretcher_ddt_table[type][i][3];
- /* Loop through each SCLK and check the frequency
- * to see if it lies within the frequency for clock stretcher.
- */
- for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
- cks_setting = 0;
- clock_freq = PP_SMC_TO_HOST_UL(
- data->smc_state_table.GraphicsLevel[j].SclkFrequency);
- /* Check the allowed frequency against the sclk level[j].
- * Sclk's endianness has already been converted,
- * and it's in 10Khz unit,
- * as opposed to Data table, which is in Mhz unit.
- */
- if (clock_freq >=
- (fiji_clock_stretcher_ddt_table[type][i][0]) * 100) {
- cks_setting |= 0x2;
- if (clock_freq <
- (fiji_clock_stretcher_ddt_table[type][i][1]) * 100)
- cks_setting |= 0x1;
- }
- data->smc_state_table.ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting |= cks_setting << (j * 2);
- }
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.
- ClockStretcherDataTable.
- ClockStretcherDataTableEntry[i].setting);
- }
-
- value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
- value &= 0xFFFFFFFE;
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
-
- return 0;
-}
-
-/**
-* Populates the SMC VRConfig field in DPM table.
-*
-* @param hwmgr the address of the hardware manager
-* @param table the SMC DPM table structure to be populated
-* @return always 0
-*/
-static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
- struct SMU73_Discrete_DpmTable *table)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint16_t config;
-
- config = VR_MERGED_WITH_VDDC;
- table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT);
-
- /* Set Vddc Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- config = VR_SVI2_PLANE_1;
- table->VRConfig |= config;
- } else {
- PP_ASSERT_WITH_CODE(false,
- "VDDC should be on SVI2 control in merged mode!",);
- }
- /* Set Vddci Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
- config = VR_SVI2_PLANE_2; /* only in merged mode */
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else if (FIJI_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
- config = VR_SMIO_PATTERN_1;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_VDDCI_SHIFT);
- }
- /* Set Mvdd Voltage Controller */
- if(FIJI_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- config = VR_SVI2_PLANE_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else if(FIJI_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- config = VR_SMIO_PATTERN_2;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- } else {
- config = VR_STATIC_VOLTAGE;
- table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
- }
-
- return 0;
-}
-
-/**
-* Initializes the SMC table and uploads it
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PowerState)
-* @return always 0
-*/
-static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct SMU73_Discrete_DpmTable *table = &(data->smc_state_table);
- const struct fiji_ulv_parm *ulv = &(data->ulv);
- uint8_t i;
- struct pp_atomctrl_gpio_pin_assignment gpio_pin;
-
- result = fiji_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result);
-
- if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control)
- fiji_populate_smc_voltage_tables(hwmgr, table);
-
- table->SystemFlags = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc))
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (data->is_memory_gddr5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (ulv->ulv_supported && table_info->us_ulv_voltage_offset) {
- result = fiji_populate_ulv_state(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
- }
-
- result = fiji_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result);
-
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result);
-
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result);
-
- result = fiji_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result);
-
- result = fiji_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result);
-
- result = fiji_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result);
-
- result = fiji_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result);
-
- /* Since only the initial state is completely set up at this point
- * (the other states are just copies of the boot state) we only
- * need to populate the ARB settings for the initial state.
- */
- result = fiji_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result);
-
- result = fiji_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result);
-
- result = fiji_populate_smc_boot_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot Level!", return result);
-
- result = fiji_populate_smc_initailial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Boot State!", return result);
-
- result = fiji_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate BAPM Parameters!", return result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
- result = fiji_populate_clock_stretcher_data_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate Clock Stretcher Data Table!",
- return result);
- }
-
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- table_info->cac_dtp_table->usTargetOperatingTemp *
- FIJI_Q88_FORMAT_CONVERSION_UNIT;
- table->TemperatureLimitLow =
- (table_info->cac_dtp_table->usTargetOperatingTemp - 1) *
- FIJI_Q88_FORMAT_CONVERSION_UNIT;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
- table->PCIeGenInterval = 1;
- table->VRConfig = 0;
-
- result = fiji_populate_vr_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate VRConfig setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
- table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = FIJI_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin)) {
- table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = FIJI_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /* Thermal Output GPIO */
- if (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID,
- &gpio_pin)) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
-
- table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift;
-
- /* For porlarity read GPIOPAD_A with assigned Gpio pin
- * since VBIOS will program this register to set 'inactive state',
- * driver can then determine 'active state' from this and
- * program SMU with correct polarity
- */
- table->ThermOutPolarity = (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) &
- (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY;
-
- /* if required, combine VRHot/PCC with thermal out GPIO */
- if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CombinePCCWithThermalSignal))
- table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT;
- } else {
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalOutGPIO);
- table->ThermOutGpio = 17;
- table->ThermOutPolarity = 1;
- table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE;
- }
-
- for (i = 0; i < SMU73_MAX_ENTRIES_SMIO; i++)
- table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]);
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = fiji_copy_bytes_to_smc(hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU73_Discrete_DpmTable) - 3 * sizeof(SMU73_PIDController),
- data->sram_end);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- return 0;
-}
-
-/**
-* Initialize the ARB DRAM timing table's index field.
-*
-* @param hwmgr the address of the powerplay hardware manager.
-* @return always 0
-*/
-static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
-{
- const struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t tmp;
- int result;
-
- /* This is a read-modify-write on the first byte of the ARB table.
- * The first byte in the SMU73_Discrete_MCArbDramTimingTable structure
- * is the field 'current'.
- * This solution is ugly, but we never write the whole table only
- * individual fields in it.
- * In reality this field should not be in that structure
- * but in a soft register.
- */
- result = fiji_read_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, &tmp, data->sram_end);
-
- if (result)
- return result;
-
- tmp &= 0x00FFFFFF;
- tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
-
- return fiji_write_smc_sram_dword(hwmgr->smumgr,
- data->arb_table_start, tmp, data->sram_end);
-}
-
-static int fiji_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
-{
- if(phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
-
- return 0;
-}
-
-static int fiji_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- SCLK_PWRMGT_OFF, 0);
- return 0;
-}
-
-static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
-
- return 0;
-}
-
-static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_ulv_parm *ulv = &(data->ulv);
-
- if (ulv->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
-
- return 0;
-}
-
-static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to enable Master Deep Sleep switch failed!",
- return -1);
- } else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t val, val0, val2;
- uint32_t i, cpl_cntl, cpl_threshold, mc_threshold;
-
- /* enable SCLK dpm */
- if(!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
-
- /* enable MCLK dpm */
- if(0 == data->mclk_dpm_key_disabled) {
- cpl_threshold = 0;
- mc_threshold = 0;
-
- /* Read per MCD tile (0 - 7) */
- for (i = 0; i < 8; i++) {
- PHM_WRITE_FIELD(hwmgr->device, MC_CONFIG_MCD, MC_RD_ENABLE, i);
- val = cgs_read_register(hwmgr->device, mmMC_SEQ_RESERVE_0_S) & 0xf0000000;
- if (0xf0000000 != val) {
- /* count number of MCQ that has channel(s) enabled */
- cpl_threshold++;
- /* only harvest 3 or full 4 supported */
- mc_threshold = val ? 3 : 4;
- }
- }
- PP_ASSERT_WITH_CODE(0 != cpl_threshold,
- "Number of MCQ is zero!", return -EINVAL;);
-
- mc_threshold = ((mc_threshold & LCAC_MC0_CNTL__MC0_THRESHOLD_MASK) <<
- LCAC_MC0_CNTL__MC0_THRESHOLD__SHIFT) |
- LCAC_MC0_CNTL__MC0_ENABLE_MASK;
- cpl_cntl = ((cpl_threshold & LCAC_CPL_CNTL__CPL_THRESHOLD_MASK) <<
- LCAC_CPL_CNTL__CPL_THRESHOLD__SHIFT) |
- LCAC_CPL_CNTL__CPL_ENABLE_MASK;
- cpl_cntl = (cpl_cntl | (8 << LCAC_CPL_CNTL__CPL_BLOCK_ID__SHIFT));
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, mc_threshold);
- if (8 == cpl_threshold) {
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC2_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC3_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC4_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC5_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC6_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC7_CNTL, mc_threshold);
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, cpl_cntl);
-
- udelay(5);
-
- mc_threshold = mc_threshold |
- (1 << LCAC_MC0_CNTL__MC0_SIGNAL_ID__SHIFT);
- cpl_cntl = cpl_cntl | (1 << LCAC_CPL_CNTL__CPL_SIGNAL_ID__SHIFT);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, mc_threshold);
- if (8 == cpl_threshold) {
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC2_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC3_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC4_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC5_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC6_CNTL, mc_threshold);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC7_CNTL, mc_threshold);
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, cpl_cntl);
-
- /* Program CAC_EN per MCD (0-7) Tile */
- val0 = val = cgs_read_register(hwmgr->device, mmMC_CONFIG_MCD);
- val &= ~(MC_CONFIG_MCD__MCD0_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD1_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD2_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD3_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD4_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD5_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD6_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MCD7_WR_ENABLE_MASK |
- MC_CONFIG_MCD__MC_RD_ENABLE_MASK);
-
- for (i = 0; i < 8; i++) {
- /* Enable MCD i Tile read & write */
- val2 = (val | (i << MC_CONFIG_MCD__MC_RD_ENABLE__SHIFT) |
- (1 << i));
- cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val2);
- /* Enbale CAC_ON MCD i Tile */
- val2 = cgs_read_register(hwmgr->device, mmMC_SEQ_CNTL);
- val2 |= MC_SEQ_CNTL__CAC_EN_MASK;
- cgs_write_register(hwmgr->device, mmMC_SEQ_CNTL, val2);
- }
- /* Set MC_CONFIG_MCD back to its default setting val0 */
- cgs_write_register(hwmgr->device, mmMC_CONFIG_MCD, val0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
- }
- return 0;
-}
-
-static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /*enable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 1);
- /* enable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 1);
- /* prepare for PCIE DPM */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + offsetof(SMU73_SoftRegisters,
- VoltageChangeTimeout), 0x1000);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
- SWRST_COMMAND_1, RESETLC, 0x0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-
- if (fiji_enable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to enable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- /* enable PCIE dpm */
- if(!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* disable SCLK dpm */
- if (!data->sclk_dpm_key_disabled)
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Disable) == 0),
- "Failed to disable SCLK DPM!",
- return -1);
-
- /* disable MCLK dpm */
- if (!data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
- "Failed to force MCLK DPM0!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Disable) == 0),
- "Failed to disable MCLK DPM!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* disable general power management */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- GLOBAL_PWRMGT_EN, 0);
- /* disable sclk deep sleep */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
- DYNAMIC_PM_EN, 0);
-
- /* disable PCIE dpm */
- if (!data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
- "Failed to disable pcie DPM during DPM Stop Function!",
- return -1);
- }
-
- if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
- printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
- return -1;
- }
-
- PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Disable) == 0),
- "Failed to disable voltage DPM during DPM Stop Function!",
- return -1);
-
- return 0;
-}
-
-static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
- uint32_t sources)
-{
- bool protection;
- enum DPM_EVENT_SRC src;
-
- switch (sources) {
- default:
- printk(KERN_ERR "Unknown throttling event sources.");
- /* fall through */
- case 0:
- protection = false;
- /* src is unused */
- break;
- case (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << PHM_AutoThrottleSource_External):
- protection = true;
- src = DPM_EVENT_SRC_EXTERNAL;
- break;
- case (1 << PHM_AutoThrottleSource_External) |
- (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
- break;
- }
- /* Order matters - don't enable thermal protection for the wrong source. */
- if (protection) {
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
- DPM_EVENT_SRC, src);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS,
- !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController));
- } else
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS, 1);
-}
-
-static int fiji_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!(data->active_auto_throttle_sources & (1 << source))) {
- data->active_auto_throttle_sources |= 1 << source;
- fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->active_auto_throttle_sources & (1 << source)) {
- data->active_auto_throttle_sources &= ~(1 << source);
- fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (!fiji_is_dpm_running(hwmgr))? 0 : -1;
- PP_ASSERT_WITH_CODE(result == 0,
- "DPM is already running right now, no need to enable DPM!",
- return 0);
-
- if (fiji_voltage_control(hwmgr)) {
- tmp_result = fiji_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "Failed to enable voltage control!",
- result = tmp_result);
- }
-
- if (fiji_voltage_control(hwmgr)) {
- tmp_result = fiji_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!",
- result = tmp_result);
- }
-
- tmp_result = fiji_initialize_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize MC reg table!", result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
-
- tmp_result = fiji_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!",
- result = tmp_result);
-
- tmp_result = fiji_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", result = tmp_result);
-
- tmp_result = fiji_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", result = tmp_result);
-
- tmp_result = fiji_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", result = tmp_result);
-
- tmp_result = fiji_initial_switch_from_arbf0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!",
- result = tmp_result);
-
- tmp_result = fiji_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", result = tmp_result);
-
- tmp_result = fiji_init_arb_table_index(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize ARB table index!", result = tmp_result);
-
- tmp_result = fiji_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate PM fuses!", result = tmp_result);
-
- tmp_result = fiji_enable_vrhot_gpio_interrupt(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
-
- tmp_result = tonga_notify_smc_display_change(hwmgr, false);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify no display!", result = tmp_result);
-
- tmp_result = fiji_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", result = tmp_result);
-
- tmp_result = fiji_enable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ULV!", result = tmp_result);
-
- tmp_result = fiji_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable deep sleep master switch!", result = tmp_result);
-
- tmp_result = fiji_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", result = tmp_result);
-
- tmp_result = fiji_enable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SMC CAC!", result = tmp_result);
-
- tmp_result = fiji_enable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable power containment!", result = tmp_result);
-
- tmp_result = fiji_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to power control set level!", result = tmp_result);
-
- tmp_result = fiji_enable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable thermal auto throttle!", result = tmp_result);
-
- return result;
-}
-
-static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
- PP_ASSERT_WITH_CODE(tmp_result == 0,
- "DPM is not running right now, no need to disable DPM!",
- return 0);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
-
- tmp_result = fiji_disable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable power containment!", result = tmp_result);
-
- tmp_result = fiji_disable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable SMC CAC!", result = tmp_result);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
-
- tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable thermal auto throttle!", result = tmp_result);
-
- tmp_result = fiji_stop_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to stop DPM!", result = tmp_result);
-
- tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable deep sleep master switch!", result = tmp_result);
-
- tmp_result = fiji_disable_ulv(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to disable ULV!", result = tmp_result);
-
- tmp_result = fiji_clear_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to clear voting clients!", result = tmp_result);
-
- tmp_result = fiji_reset_to_default(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to reset to default!", result = tmp_result);
-
- tmp_result = fiji_force_switch_to_arbf0(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to force to switch arbf0!", result = tmp_result);
-
- return result;
-}
-
-static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t level, tmp;
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++;
- if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
- }
- }
- return 0;
-}
-
-static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- phm_apply_dal_min_voltage_request(hwmgr);
-
- if (!data->sclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- }
- return 0;
-}
-
-static int fiji_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (!fiji_is_dpm_running(hwmgr))
- return -EINVAL;
-
- if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
- }
-
- return fiji_upload_dpmlevel_enable_mask(hwmgr);
-}
-
-static uint32_t fiji_get_lowest_enabled_level(
- struct pp_hwmgr *hwmgr, uint32_t mask)
-{
- uint32_t level = 0;
-
- while(0 == (mask & (1 << level)))
- level++;
-
- return level;
-}
-
-static int fiji_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data =
- (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t level;
-
- if (!data->sclk_dpm_key_disabled)
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
-
- }
-
- if (!data->mclk_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
- }
- }
-
- if (!data->pcie_dpm_key_disabled) {
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- level = fiji_get_lowest_enabled_level(hwmgr,
- data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- (1 << level));
- }
- }
-
- return 0;
-
-}
-static int fiji_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = fiji_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = fiji_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = fiji_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
-
- return ret;
-}
-
-static int fiji_get_power_state_size(struct pp_hwmgr *hwmgr)
-{
- return sizeof(struct fiji_power_state);
-}
-
-static int fiji_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
- void *state, struct pp_power_state *power_state,
- void *pp_table, uint32_t classification_flag)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_power_state *fiji_power_state =
- (struct fiji_power_state *)(&(power_state->hardware));
- struct fiji_performance_level *performance_level;
- ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
- ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
- (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
- ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
- (ATOM_Tonga_SCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
- ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
- (ATOM_Tonga_MCLK_Dependency_Table *)
- (((unsigned long)powerplay_table) +
- le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-
- /* The following fields are not initialized here: id orderedList allStatesList */
- power_state->classification.ui_label =
- (le16_to_cpu(state_entry->usClassification) &
- ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
- ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
- power_state->classification.flags = classification_flag;
- /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
-
- power_state->classification.temporary_state = false;
- power_state->classification.to_be_deleted = false;
-
- power_state->validation.disallowOnDC =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_DISALLOW_ON_DC));
-
- power_state->pcie.lanes = 0;
-
- power_state->display.disableFrameModulation = false;
- power_state->display.limitRefreshrate = false;
- power_state->display.enableVariBright =
- (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
- ATOM_Tonga_ENABLE_VARIBRIGHT));
-
- power_state->validation.supportedPowerLevels = 0;
- power_state->uvd_clocks.VCLK = 0;
- power_state->uvd_clocks.DCLK = 0;
- power_state->temperatures.min = 0;
- power_state->temperatures.max = 0;
-
- performance_level = &(fiji_power_state->performance_levels
- [fiji_power_state->performance_level_count++]);
-
- PP_ASSERT_WITH_CODE(
- (fiji_power_state->performance_level_count < SMU73_MAX_LEVELS_GRAPHICS),
- "Performance levels exceeds SMC limit!",
- return -1);
-
- PP_ASSERT_WITH_CODE(
- (fiji_power_state->performance_level_count <=
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -1);
-
- /* Performance levels are arranged from low to high. */
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexLow].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
- [state_entry->ucEngineClockIndexLow].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenLow);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- performance_level = &(fiji_power_state->performance_levels
- [fiji_power_state->performance_level_count++]);
- performance_level->memory_clock = mclk_dep_table->entries
- [state_entry->ucMemoryClockIndexHigh].ulMclk;
- performance_level->engine_clock = sclk_dep_table->entries
- [state_entry->ucEngineClockIndexHigh].ulSclk;
- performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
- state_entry->ucPCIEGenHigh);
- performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
-
- return 0;
-}
-
-static int fiji_get_pp_table_entry(struct pp_hwmgr *hwmgr,
- unsigned long entry_index, struct pp_power_state *state)
-{
- int result;
- struct fiji_power_state *ps;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
- table_info->vdd_dep_on_mclk;
-
- state->hardware.magic = PHM_VIslands_Magic;
-
- ps = (struct fiji_power_state *)(&state->hardware);
-
- result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
- fiji_get_pp_table_entry_callback_func);
-
- /* This is the earliest time we have all the dependency table and the VBIOS boot state
- * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
- * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
- */
- if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
- if (dep_mclk_table->entries[0].clk !=
- data->vbios_boot_state.mclk_bootup_value)
- printk(KERN_ERR "Single MCLK entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot MCLK level");
- if (dep_mclk_table->entries[0].vddci !=
- data->vbios_boot_state.vddci_bootup_value)
- printk(KERN_ERR "Single VDDCI entry VDDCI/MCLK dependency table "
- "does not match VBIOS boot VDDCI level");
- }
-
- /* set DC compatible flag if this state supports DC */
- if (!state->validation.disallowOnDC)
- ps->dc_compatible = true;
-
- if (state->classification.flags & PP_StateClassificationFlag_ACPI)
- data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
-
- ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
- ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
-
- if (!result) {
- uint32_t i;
-
- switch (state->classification.ui_label) {
- case PP_StateUILabel_Performance:
- data->use_pcie_performance_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_performance.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_performance.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_performance.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_performance.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_performance.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_performance.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- case PP_StateUILabel_Battery:
- data->use_pcie_power_saving_levels = true;
-
- for (i = 0; i < ps->performance_level_count; i++) {
- if (data->pcie_gen_power_saving.max <
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.max =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_gen_power_saving.min >
- ps->performance_levels[i].pcie_gen)
- data->pcie_gen_power_saving.min =
- ps->performance_levels[i].pcie_gen;
-
- if (data->pcie_lane_power_saving.max <
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.max =
- ps->performance_levels[i].pcie_lane;
-
- if (data->pcie_lane_power_saving.min >
- ps->performance_levels[i].pcie_lane)
- data->pcie_lane_power_saving.min =
- ps->performance_levels[i].pcie_lane;
- }
- break;
- default:
- break;
- }
- }
- return 0;
-}
-
-static int fiji_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *request_ps,
- const struct pp_power_state *current_ps)
-{
- struct fiji_power_state *fiji_ps =
- cast_phw_fiji_power_state(&request_ps->hardware);
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery ==
- request_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(fiji_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",);
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- /* Cap clock DPM tables at DC MAX if it is in DC. */
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- if (fiji_ps->performance_levels[i].memory_clock > max_limits->mclk)
- fiji_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (fiji_ps->performance_levels[i].engine_clock > max_limits->sclk)
- fiji_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- fiji_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
- fiji_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
-
- fiji_ps->acp_clk = hwmgr->acp_arbiter.acpclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- /*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
-
- /* TO DO GetMinClockSettings(hwmgr->pPECI, &minimum_clocks); */
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = table_info->vdd_dep_on_sclk->count - 1;
- count >= 0; count--) {
- if (stable_pstate_sclk >=
- table_info->vdd_dep_on_sclk->entries[count].clk) {
- stable_pstate_sclk =
- table_info->vdd_dep_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- fiji_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- fiji_ps->performance_levels[1].engine_clock =
- hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
- hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive =
- hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- fiji_ps->performance_levels[1].memory_clock =
- hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = fiji_ps->performance_levels[0].engine_clock;
- mclk = fiji_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
- max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
- max_limits->mclk : minimum_clocks.memoryClock;
-
- fiji_ps->performance_levels[0].engine_clock = sclk;
- fiji_ps->performance_levels[0].memory_clock = mclk;
-
- fiji_ps->performance_levels[1].engine_clock =
- (fiji_ps->performance_levels[1].engine_clock >=
- fiji_ps->performance_levels[0].engine_clock) ?
- fiji_ps->performance_levels[1].engine_clock :
- fiji_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < fiji_ps->performance_levels[1].memory_clock)
- mclk = fiji_ps->performance_levels[1].memory_clock;
-
- fiji_ps->performance_levels[0].memory_clock = mclk;
- fiji_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (fiji_ps->performance_levels[1].memory_clock <
- fiji_ps->performance_levels[0].memory_clock)
- fiji_ps->performance_levels[1].memory_clock =
- fiji_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- fiji_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- fiji_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- fiji_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- fiji_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
-
- return 0;
-}
-
-static int fiji_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].engine_clock;
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- if(data->display_timing.min_clock_in_sr !=
- hwmgr->display_config.min_core_set_clock_in_sr)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= mclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t fiji_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
- const struct fiji_power_state *fiji_ps)
-{
- uint32_t i;
- uint32_t sclk, max_sclk = 0;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
- for (i = 0; i < fiji_ps->performance_level_count; i++) {
- sclk = fiji_ps->performance_levels[i].engine_clock;
- if (max_sclk < sclk)
- max_sclk = sclk;
- }
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
- return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
- dpm_table->pcie_speed_table.dpm_levels
- [dpm_table->pcie_speed_table.count - 1].value :
- dpm_table->pcie_speed_table.dpm_levels[i].value);
- }
-
- return 0;
-}
-
-static int fiji_request_link_speed_change_before_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_nps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- const struct fiji_power_state *fiji_cps =
- cast_const_phw_fiji_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch(target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = fiji_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t sclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].engine_clock;
- uint32_t mclk = fiji_ps->performance_levels
- [fiji_ps->performance_level_count - 1].memory_clock;
- struct fiji_dpm_table *dpm_table = &data->dpm_table;
-
- struct fiji_dpm_table *golden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count - 1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
- /* Need to do calculation based on the golden DPM table
- * as the Heatmap GPU Clock axis is also based on the default values
- */
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->sclk_table.count < 2 ?
- 0 : dpm_table->sclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (sclk > golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value) {
- clock_percent =
- ((sclk - golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value) * 100) /
- golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value +
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent)/100;
-
- } else if (golden_dpm_table->sclk_table.dpm_levels
- [dpm_table->sclk_table.count-1].value > sclk) {
- clock_percent =
- ((golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count - 1].value - sclk) *
- 100) /
- golden_dpm_table->sclk_table.dpm_levels
- [golden_dpm_table->sclk_table.count-1].value;
-
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value -
- (golden_dpm_table->sclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->sclk_table.dpm_levels[i].value =
- golden_dpm_table->sclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
- dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count - 1].value = mclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
-
- PP_ASSERT_WITH_CODE(
- (golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value != 0),
- "Divide by 0!",
- return -1);
- dpm_count = dpm_table->mclk_table.count < 2 ?
- 0 : dpm_table->mclk_table.count - 2;
- for (i = dpm_count; i > 1; i--) {
- if (mclk > golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value) {
- clock_percent = ((mclk -
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value) * 100) /
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value +
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
-
- } else if (golden_dpm_table->mclk_table.dpm_levels
- [dpm_table->mclk_table.count-1].value > mclk) {
- clock_percent = ((golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value - mclk) * 100) /
- golden_dpm_table->mclk_table.dpm_levels
- [golden_dpm_table->mclk_table.count-1].value;
-
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value -
- (golden_dpm_table->mclk_table.dpm_levels[i].value *
- clock_percent) / 100;
- } else
- dpm_table->mclk_table.dpm_levels[i].value =
- golden_dpm_table->mclk_table.dpm_levels[i].value;
- }
- }
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = fiji_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
- /*populate MCLK dpm table to SMU7 */
- result = fiji_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
- return result);
- }
-
- return result;
-}
-
-static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
- struct fiji_single_dpm_table * dpm_table,
- uint32_t low_limit, uint32_t high_limit)
-{
- uint32_t i;
-
- for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit) ||
- (dpm_table->dpm_levels[i].value > high_limit))
- dpm_table->dpm_levels[i].enabled = false;
- else
- dpm_table->dpm_levels[i].enabled = true;
- }
- return 0;
-}
-
-static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
- const struct fiji_power_state *fiji_ps)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t high_limit_count;
-
- PP_ASSERT_WITH_CODE((fiji_ps->performance_level_count >= 1),
- "power state did not have any performance level",
- return -1);
-
- high_limit_count = (1 == fiji_ps->performance_level_count) ? 0 : 1;
-
- fiji_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.sclk_table),
- fiji_ps->performance_levels[0].engine_clock,
- fiji_ps->performance_levels[high_limit_count].engine_clock);
-
- fiji_trim_single_dpm_states(hwmgr,
- &(data->dpm_table.mclk_table),
- fiji_ps->performance_levels[0].memory_clock,
- fiji_ps->performance_levels[high_limit_count].memory_clock);
-
- return 0;
-}
-
-static int fiji_generate_dpm_level_enable_mask(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- int result;
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
-
- result = fiji_trim_dpm_states(hwmgr, fiji_ps);
- if (result)
- return result;
-
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
- data->dpm_level_enable_mask.mclk_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
- data->last_mclk_dpm_enable_mask =
- data->dpm_level_enable_mask.mclk_dpm_enable_mask;
-
- if (data->uvd_enabled) {
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
- data->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
- }
-
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- fiji_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-static int fiji_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Enable :
- (PPSMC_Msg)PPSMC_MSG_UVDDPM_Disable);
-}
-
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
-}
-
-static int fiji_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_SAMUDPM_Enable :
- PPSMC_MSG_SAMUDPM_Disable);
-}
-
-static int fiji_enable_disable_acp_dpm(struct pp_hwmgr *hwmgr, bool enable)
-{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable?
- PPSMC_MSG_ACPDPM_Enable :
- PPSMC_MSG_ACPDPM_Disable);
-}
-
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.UvdBootLevel = 0;
- if (table_info->mm_dep_table->count > 0)
- data->smc_state_table.UvdBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, UvdBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0x00FFFFFF;
- mm_boot_level_value |= data->smc_state_table.UvdBootLevel << 24;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.UvdBootLevel));
- }
-
- return fiji_enable_disable_uvd_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_nps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- const struct fiji_power_state *fiji_cps =
- cast_const_phw_fiji_power_state(states->pcurrent_state);
-
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (fiji_nps->vce_clks.evclk >0 &&
- (fiji_cps == NULL || fiji_cps->vce_clks.evclk == 0)) {
- data->smc_state_table.VceBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
-
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, VceBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFF00FFFF;
- mm_boot_level_value |= data->smc_state_table.VceBootLevel << 16;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << data->smc_state_table.VceBootLevel);
-
- fiji_enable_disable_vce_dpm(hwmgr, true);
- } else if (fiji_nps->vce_clks.evclk == 0 &&
- fiji_cps != NULL &&
- fiji_cps->vce_clks.evclk > 0)
- fiji_enable_disable_vce_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.SamuBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, SamuBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFFFF00;
- mm_boot_level_value |= data->smc_state_table.SamuBootLevel << 0;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.SamuBootLevel));
- }
-
- return fiji_enable_disable_samu_dpm(hwmgr, !bgate);
-}
-
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t mm_boot_level_offset, mm_boot_level_value;
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
-
- if (!bgate) {
- data->smc_state_table.AcpBootLevel =
- (uint8_t) (table_info->mm_dep_table->count - 1);
- mm_boot_level_offset = data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable, AcpBootLevel);
- mm_boot_level_offset /= 4;
- mm_boot_level_offset *= 4;
- mm_boot_level_value = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset);
- mm_boot_level_value &= 0xFFFF00FF;
- mm_boot_level_value |= data->smc_state_table.AcpBootLevel << 8;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- (uint32_t)(1 << data->smc_state_table.AcpBootLevel));
- }
-
- return fiji_enable_disable_acp_dpm(hwmgr, !bgate);
-}
-
-static int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- int result = 0;
- uint32_t low_sclk_interrupt_threshold = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
- data->low_sclk_interrupt_threshold)) {
- data->low_sclk_interrupt_threshold =
- hwmgr->gfx_arbiter.sclk_threshold;
- low_sclk_interrupt_threshold =
- data->low_sclk_interrupt_threshold;
-
- CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
-
- result = fiji_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->dpm_table_start +
- offsetof(SMU73_Discrete_DpmTable,
- LowSclkInterruptThreshold),
- (uint8_t *)&low_sclk_interrupt_threshold,
- sizeof(uint32_t),
- data->sram_end);
- }
-
- return result;
-}
-
-static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
- return fiji_program_memory_timing_parameters(hwmgr);
-
- return 0;
-}
-
-static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
-
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
-
- PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
- "Trying to Unfreeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
- "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
- return -1);
- }
-
- data->need_update_smu7_dpm_table = 0;
-
- return 0;
-}
-
-/* Look up the voltaged based on DAL's requested level.
- * and then send the requested VDDC voltage to SMC
- */
-static void fiji_apply_dal_minimum_voltage_request(struct pp_hwmgr *hwmgr)
-{
- return;
-}
-
-static int fiji_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Apply minimum voltage based on DAL's request level */
- fiji_apply_dal_minimum_voltage_request(hwmgr);
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this,
- * we should skip this message.
- */
- if (!fiji_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ] "
- "Trying to set Enable Mask when DPM is disabled \n");
-
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Sclk Dpm enable Mask failed", return -1);
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* Checking if DPM is running. If we discover hang because of this,
- * we should skip this message.
- */
- if (!fiji_is_dpm_running(hwmgr))
- printk(KERN_ERR "[ powerplay ]"
- " Trying to set Enable Mask when DPM is disabled \n");
-
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- PP_ASSERT_WITH_CODE((0 == result),
- "Set Mclk Dpm enable Mask failed", return -1);
- }
- }
-
- return 0;
-}
-
-static int fiji_notify_link_speed_change_after_state_change(
- struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states =
- (const struct phm_set_power_state_input *)input;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_power_state *fiji_ps =
- cast_const_phw_fiji_power_state(states->pnew_state);
- uint16_t target_link_speed = fiji_get_maximum_link_speed(hwmgr, fiji_ps);
- uint8_t request;
-
- if (data->pspp_notify_required) {
- if (target_link_speed == PP_PCIEGen3)
- request = PCIE_PERF_REQ_GEN3;
- else if (target_link_speed == PP_PCIEGen2)
- request = PCIE_PERF_REQ_GEN2;
- else
- request = PCIE_PERF_REQ_GEN1;
-
- if(request == PCIE_PERF_REQ_GEN1 &&
- fiji_get_current_pcie_speed(hwmgr) > 0)
- return 0;
-
- if (acpi_pcie_perf_request(hwmgr->device, request, false)) {
- if (PP_PCIEGen2 == target_link_speed)
- printk("PSPP request to switch to Gen2 from Gen3 Failed!");
- else
- printk("PSPP request to switch to Gen1 from Gen2 Failed!");
- }
- }
-
- return 0;
-}
-
-static int fiji_set_power_state_tasks(struct pp_hwmgr *hwmgr,
- const void *input)
-{
- int tmp_result, result = 0;
-
- tmp_result = fiji_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to find DPM states clocks in DPM table!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- fiji_request_link_speed_change_before_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to request link speed change before state change!",
- result = tmp_result);
- }
-
- tmp_result = fiji_freeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
-
- tmp_result = fiji_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate and upload SCLK MCLK DPM levels!",
- result = tmp_result);
-
- tmp_result = fiji_generate_dpm_level_enable_mask(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to generate DPM level enabled mask!",
- result = tmp_result);
-
- tmp_result = fiji_update_vce_dpm(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update VCE DPM!",
- result = tmp_result);
-
- tmp_result = fiji_update_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to update SCLK threshold!",
- result = tmp_result);
-
- tmp_result = fiji_program_mem_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program memory timing parameters!",
- result = tmp_result);
-
- tmp_result = fiji_unfreeze_sclk_mclk_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to unfreeze SCLK MCLK DPM!",
- result = tmp_result);
-
- tmp_result = fiji_upload_dpm_level_enable_mask(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to upload DPM level enabled mask!",
- result = tmp_result);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PCIEPerformanceRequest)) {
- tmp_result =
- fiji_notify_link_speed_change_after_state_change(hwmgr, input);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to notify link speed change after state change!",
- result = tmp_result);
- }
-
- return result;
-}
-
-static int fiji_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- if (low)
- return fiji_ps->performance_levels[0].engine_clock;
- else
- return fiji_ps->performance_levels
- [fiji_ps->performance_level_count-1].engine_clock;
-}
-
-static int fiji_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
-{
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- if (low)
- return fiji_ps->performance_levels[0].memory_clock;
- else
- return fiji_ps->performance_levels
- [fiji_ps->performance_level_count-1].memory_clock;
-}
-
-static void fiji_print_current_perforce_level(
- struct pp_hwmgr *hwmgr, struct seq_file *m)
-{
- uint32_t sclk, mclk, activity_percent = 0;
- uint32_t offset;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
-
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
-
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- seq_printf(m, "\n [ mclk ]: %u MHz\n\n [ sclk ]: %u MHz\n",
- mclk / 100, sclk / 100);
-
- offset = data->soft_regs_start + offsetof(SMU73_SoftRegisters, AverageGraphicsActivity);
- activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
- activity_percent += 0x80;
- activity_percent >>= 8;
-
- seq_printf(m, "\n [GPU load]: %u%%\n\n", activity_percent > 100 ? 100 : activity_percent);
-
- seq_printf(m, "uvd %sabled\n", data->uvd_power_gated ? "dis" : "en");
-
- seq_printf(m, "vce %sabled\n", data->vce_power_gated ? "dis" : "en");
-}
-
-static int fiji_program_display_gap(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t num_active_displays = 0;
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
- uint32_t display_gap2;
- uint32_t pre_vbi_time_in_us;
- uint32_t frame_time_in_us;
- uint32_t ref_clock;
- uint32_t refresh_rate = 0;
- struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
-
- info.mode_info = &mode_info;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_active_displays = info.display_count;
-
- display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
- DISP_GAP, (num_active_displays > 0)?
- DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- ref_clock = mode_info.ref_clock;
- refresh_rate = mode_info.refresh_rate;
-
- if (refresh_rate == 0)
- refresh_rate = 60;
-
- frame_time_in_us = 1000000 / refresh_rate;
-
- pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
- display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL2, display_gap2);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- offsetof(SMU73_SoftRegisters, PreVBlankGap), 0x64);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start +
- offsetof(SMU73_SoftRegisters, VBlankTimeout),
- (frame_time_in_us - pre_vbi_time_in_us));
-
- if (num_active_displays == 1)
- tonga_notify_smc_display_change(hwmgr, true);
-
- return 0;
-}
-
-static int fiji_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
-{
- return fiji_program_display_gap(hwmgr);
-}
-
-static int fiji_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr,
- uint16_t us_max_fan_pwm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
-}
-
-static int fiji_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr,
- uint16_t us_max_fan_rpm)
-{
- hwmgr->thermal_controller.
- advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
-
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
-}
-
-static int fiji_dpm_set_interrupt_state(void *private_data,
- unsigned src_id, unsigned type,
- int enabled)
-{
- uint32_t cg_thermal_int;
- struct pp_hwmgr *hwmgr = ((struct pp_eventmgr *)private_data)->hwmgr;
-
- if (hwmgr == NULL)
- return -EINVAL;
-
- switch (type) {
- case AMD_THERMAL_IRQ_LOW_TO_HIGH:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
-
- case AMD_THERMAL_IRQ_HIGH_TO_LOW:
- if (enabled) {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- } else {
- cg_thermal_int = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- cgs_write_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_THERMAL_INT, cg_thermal_int);
- }
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int fiji_register_internal_thermal_interrupt(struct pp_hwmgr *hwmgr,
- const void *thermal_interrupt_info)
-{
- int result;
- const struct pp_interrupt_registration_info *info =
- (const struct pp_interrupt_registration_info *)
- thermal_interrupt_info;
-
- if (info == NULL)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 230, AMD_THERMAL_IRQ_LAST,
- fiji_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- result = cgs_add_irq_source(hwmgr->device, 231, AMD_THERMAL_IRQ_LAST,
- fiji_dpm_set_interrupt_state,
- info->call_back, info->context);
-
- if (result)
- return -EINVAL;
-
- return 0;
-}
-
-static int fiji_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
-{
- if (mode) {
- /* stop auto-manage */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- fiji_fan_ctrl_stop_smc_fan_control(hwmgr);
- fiji_fan_ctrl_set_static_mode(hwmgr, mode);
- } else
- /* restart auto-manage */
- fiji_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
-}
-
-static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr->fan_ctrl_is_in_default_mode)
- return hwmgr->fan_ctrl_default_mode;
- else
- return PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- CG_FDO_CTRL2, FDO_PWM_MODE);
-}
-
-static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, uint32_t mask)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
-
- case PP_MCLK:
- if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
-
- case PP_PCIE:
- {
- uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr,
- enum pp_clock_type type, char *buf)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct fiji_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = fiji_get_current_pcie_speed(hwmgr);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
- return size;
-}
-
-static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1,
- const struct fiji_performance_level *pl2)
-{
- return ((pl1->memory_clock == pl2->memory_clock) &&
- (pl1->engine_clock == pl2->engine_clock) &&
- (pl1->pcie_gen == pl2->pcie_gen) &&
- (pl1->pcie_lane == pl2->pcie_lane));
-}
-
-static int
-fiji_check_states_equal(struct pp_hwmgr *hwmgr,
- const struct pp_hw_power_state *pstate1,
- const struct pp_hw_power_state *pstate2, bool *equal)
-{
- const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1);
- const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2);
- int i;
-
- if (equal == NULL || psa == NULL || psb == NULL)
- return -EINVAL;
-
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
- *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
- *equal &= (psa->acp_clk == psb->acp_clk);
-
- return 0;
-}
-
-static bool
-fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- bool is_update_required = false;
- struct cgs_display_info info = {0,0,NULL};
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- is_update_required = true;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
- if(hwmgr->display_config.min_core_set_clock_in_sr != data->display_timing.min_clock_in_sr)
- is_update_required = true;
- }
-
- return is_update_required;
-}
-
-static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
- struct fiji_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *golden_sclk_table =
- &(data->golden_dpm_table.sclk_table);
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
- struct fiji_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct fiji_single_dpm_table *golden_mclk_table =
- &(data->golden_dpm_table.mclk_table);
- struct pp_power_state *ps;
- struct fiji_power_state *fiji_ps;
-
- if (value > 20)
- value = 20;
-
- ps = hwmgr->request_ps;
-
- if (ps == NULL)
- return -EINVAL;
-
- fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
-
- fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-
-static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
- .backend_init = &fiji_hwmgr_backend_init,
- .backend_fini = &fiji_hwmgr_backend_fini,
- .asic_setup = &fiji_setup_asic_task,
- .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
- .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
- .force_dpm_level = &fiji_dpm_force_dpm_level,
- .get_num_of_pp_table_entries = &get_number_of_powerplay_table_entries_v1_0,
- .get_power_state_size = &fiji_get_power_state_size,
- .get_pp_table_entry = &fiji_get_pp_table_entry,
- .patch_boot_state = &fiji_patch_boot_state,
- .apply_state_adjust_rules = &fiji_apply_state_adjust_rules,
- .power_state_set = &fiji_set_power_state_tasks,
- .get_sclk = &fiji_dpm_get_sclk,
- .get_mclk = &fiji_dpm_get_mclk,
- .print_current_perforce_level = &fiji_print_current_perforce_level,
- .powergate_uvd = &fiji_phm_powergate_uvd,
- .powergate_vce = &fiji_phm_powergate_vce,
- .disable_clock_power_gating = &fiji_phm_disable_clock_power_gating,
- .notify_smc_display_config_after_ps_adjustment =
- &tonga_notify_smc_display_config_after_ps_adjustment,
- .display_config_changed = &fiji_display_configuration_changed_task,
- .set_max_fan_pwm_output = fiji_set_max_fan_pwm_output,
- .set_max_fan_rpm_output = fiji_set_max_fan_rpm_output,
- .get_temperature = fiji_thermal_get_temperature,
- .stop_thermal_controller = fiji_thermal_stop_thermal_controller,
- .get_fan_speed_info = fiji_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_percent = fiji_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent = fiji_fan_ctrl_set_fan_speed_percent,
- .reset_fan_speed_to_default = fiji_fan_ctrl_reset_fan_speed_to_default,
- .get_fan_speed_rpm = fiji_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm = fiji_fan_ctrl_set_fan_speed_rpm,
- .uninitialize_thermal_controller = fiji_thermal_ctrl_uninitialize_thermal_controller,
- .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt,
- .set_fan_control_mode = fiji_set_fan_control_mode,
- .get_fan_control_mode = fiji_get_fan_control_mode,
- .check_states_equal = fiji_check_states_equal,
- .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
- .force_clock_level = fiji_force_clock_level,
- .print_clock_levels = fiji_print_clock_levels,
- .get_sclk_od = fiji_get_sclk_od,
- .set_sclk_od = fiji_set_sclk_od,
- .get_mclk_od = fiji_get_mclk_od,
- .set_mclk_od = fiji_set_mclk_od,
-};
-
-int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
-{
- hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
- hwmgr->pptable_func = &pptable_v1_0_funcs;
- pp_fiji_thermal_initialize(hwmgr);
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
deleted file mode 100644
index bf67c2a92c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _FIJI_HWMGR_H_
-#define _FIJI_HWMGR_H_
-
-#include "hwmgr.h"
-#include "smu73.h"
-#include "smu73_discrete.h"
-#include "ppatomctrl.h"
-#include "fiji_ppsmc.h"
-#include "pp_endian.h"
-
-#define FIJI_MAX_HARDWARE_POWERLEVELS 2
-#define FIJI_AT_DFLT 30
-
-#define FIJI_VOLTAGE_CONTROL_NONE 0x0
-#define FIJI_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define FIJI_VOLTAGE_CONTROL_BY_SVID2 0x2
-#define FIJI_VOLTAGE_CONTROL_MERGED 0x3
-
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
-struct fiji_performance_level {
- uint32_t memory_clock;
- uint32_t engine_clock;
- uint16_t pcie_gen;
- uint16_t pcie_lane;
-};
-
-struct fiji_uvd_clocks {
- uint32_t vclk;
- uint32_t dclk;
-};
-
-struct fiji_vce_clocks {
- uint32_t evclk;
- uint32_t ecclk;
-};
-
-struct fiji_power_state {
- uint32_t magic;
- struct fiji_uvd_clocks uvd_clks;
- struct fiji_vce_clocks vce_clks;
- uint32_t sam_clk;
- uint32_t acp_clk;
- uint16_t performance_level_count;
- bool dc_compatible;
- uint32_t sclk_threshold;
- struct fiji_performance_level performance_levels[FIJI_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct fiji_dpm_level {
- bool enabled;
- uint32_t value;
- uint32_t param1;
-};
-
-#define FIJI_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define FIJI_MINIMUM_ENGINE_CLOCK 2500
-
-struct fiji_single_dpm_table {
- uint32_t count;
- struct fiji_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct fiji_dpm_table {
- struct fiji_single_dpm_table sclk_table;
- struct fiji_single_dpm_table mclk_table;
- struct fiji_single_dpm_table pcie_speed_table;
- struct fiji_single_dpm_table vddc_table;
- struct fiji_single_dpm_table vddci_table;
- struct fiji_single_dpm_table mvdd_table;
-};
-
-struct fiji_clock_registers {
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t vDLL_CNTL;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_SS1;
- uint32_t vMPLL_SS2;
-};
-
-struct fiji_voltage_smio_registers {
- uint32_t vS0_VID_LOWER_SMIO_CNTL;
-};
-
-#define FIJI_MAX_LEAKAGE_COUNT 8
-struct fiji_leakage_voltage {
- uint16_t count;
- uint16_t leakage_id[FIJI_MAX_LEAKAGE_COUNT];
- uint16_t actual_voltage[FIJI_MAX_LEAKAGE_COUNT];
-};
-
-struct fiji_vbios_boot_state {
- uint16_t mvdd_bootup_value;
- uint16_t vddc_bootup_value;
- uint16_t vddci_bootup_value;
- uint32_t sclk_bootup_value;
- uint32_t mclk_bootup_value;
- uint16_t pcie_gen_bootup_value;
- uint16_t pcie_lane_bootup_value;
-};
-
-struct fiji_bacos {
- uint32_t best_match;
- uint32_t baco_flags;
- struct fiji_performance_level performance_level;
-};
-
-/* Ultra Low Voltage parameter structure */
-struct fiji_ulv_parm {
- bool ulv_supported;
- uint32_t cg_ulv_parameter;
- uint32_t ulv_volt_change_delay;
- struct fiji_performance_level ulv_power_level;
-};
-
-struct fiji_display_timing {
- uint32_t min_clock_in_sr;
- uint32_t num_existing_displays;
-};
-
-struct fiji_dpmlevel_enable_mask {
- uint32_t uvd_dpm_enable_mask;
- uint32_t vce_dpm_enable_mask;
- uint32_t acp_dpm_enable_mask;
- uint32_t samu_dpm_enable_mask;
- uint32_t sclk_dpm_enable_mask;
- uint32_t mclk_dpm_enable_mask;
- uint32_t pcie_dpm_enable_mask;
-};
-
-struct fiji_pcie_perf_range {
- uint16_t max;
- uint16_t min;
-};
-
-struct fiji_hwmgr {
- struct fiji_dpm_table dpm_table;
- struct fiji_dpm_table golden_dpm_table;
-
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
- uint32_t static_screen_threshold_unit;
- uint32_t static_screen_threshold;
- uint32_t voltage_control;
- uint32_t vddc_vddci_delta;
-
- uint32_t active_auto_throttle_sources;
-
- struct fiji_clock_registers clock_registers;
- struct fiji_voltage_smio_registers voltage_smio_registers;
-
- bool is_memory_gddr5;
- uint16_t acpi_vddc;
- bool pspp_notify_required;
- uint16_t force_pcie_gen;
- uint16_t acpi_pcie_gen;
- uint32_t pcie_gen_cap;
- uint32_t pcie_lane_cap;
- uint32_t pcie_spc_cap;
- struct fiji_leakage_voltage vddc_leakage;
- struct fiji_leakage_voltage Vddci_leakage;
-
- uint32_t mvdd_control;
- uint32_t vddc_mask_low;
- uint32_t mvdd_mask_low;
- uint16_t max_vddc_in_pptable;
- uint16_t min_vddc_in_pptable;
- uint16_t max_vddci_in_pptable;
- uint16_t min_vddci_in_pptable;
- uint32_t mclk_strobe_mode_threshold;
- uint32_t mclk_stutter_mode_threshold;
- uint32_t mclk_edc_enable_threshold;
- uint32_t mclk_edcwr_enable_threshold;
- bool is_uvd_enabled;
- struct fiji_vbios_boot_state vbios_boot_state;
-
- bool battery_state;
- bool is_tlu_enabled;
-
- /* ---- SMC SRAM Address of firmware header tables ---- */
- uint32_t sram_end;
- uint32_t dpm_table_start;
- uint32_t soft_regs_start;
- uint32_t mc_reg_table_start;
- uint32_t fan_table_start;
- uint32_t arb_table_start;
- struct SMU73_Discrete_DpmTable smc_state_table;
- struct SMU73_Discrete_Ulv ulv_setting;
-
- /* ---- Stuff originally coming from Evergreen ---- */
- uint32_t vddci_control;
- struct pp_atomctrl_voltage_table vddc_voltage_table;
- struct pp_atomctrl_voltage_table vddci_voltage_table;
- struct pp_atomctrl_voltage_table mvdd_voltage_table;
-
- uint32_t mgcg_cgtt_local2;
- uint32_t mgcg_cgtt_local3;
- uint32_t gpio_debug;
- uint32_t mc_micro_code_feature;
- uint32_t highest_mclk;
- uint16_t acpi_vddci;
- uint8_t mvdd_high_index;
- uint8_t mvdd_low_index;
- bool dll_default_on;
- bool performance_request_registered;
-
- /* ---- Low Power Features ---- */
- struct fiji_bacos bacos;
- struct fiji_ulv_parm ulv;
-
- /* ---- CAC Stuff ---- */
- uint32_t cac_table_start;
- bool cac_configuration_required;
- bool driver_calculate_cac_leakage;
- bool cac_enabled;
-
- /* ---- DPM2 Parameters ---- */
- uint32_t power_containment_features;
- bool enable_dte_feature;
- bool enable_tdc_limit_feature;
- bool enable_pkg_pwr_tracking_feature;
- bool disable_uvd_power_tune_feature;
- const struct fiji_pt_defaults *power_tune_defaults;
- struct SMU73_Discrete_PmFuses power_tune_table;
- uint32_t dte_tj_offset;
- uint32_t fast_watermark_threshold;
-
- /* ---- Phase Shedding ---- */
- bool vddc_phase_shed_control;
-
- /* ---- DI/DT ---- */
- struct fiji_display_timing display_timing;
-
- /* ---- Thermal Temperature Setting ---- */
- struct fiji_dpmlevel_enable_mask dpm_level_enable_mask;
- uint32_t need_update_smu7_dpm_table;
- uint32_t sclk_dpm_key_disabled;
- uint32_t mclk_dpm_key_disabled;
- uint32_t pcie_dpm_key_disabled;
- uint32_t min_engine_clocks;
- struct fiji_pcie_perf_range pcie_gen_performance;
- struct fiji_pcie_perf_range pcie_lane_performance;
- struct fiji_pcie_perf_range pcie_gen_power_saving;
- struct fiji_pcie_perf_range pcie_lane_power_saving;
- bool use_pcie_performance_levels;
- bool use_pcie_power_saving_levels;
- uint32_t activity_target[SMU73_MAX_LEVELS_GRAPHICS];
- uint32_t mclk_activity_target;
- uint32_t mclk_dpm0_activity_target;
- uint32_t low_sclk_interrupt_threshold;
- uint32_t last_mclk_dpm_enable_mask;
- bool uvd_enabled;
-
- /* ---- Power Gating States ---- */
- bool uvd_power_gated;
- bool vce_power_gated;
- bool samu_power_gated;
- bool acp_power_gated;
- bool pg_acp_init;
- bool frtc_enabled;
- bool frtc_status_changed;
-};
-
-/* To convert to Q8.8 format for firmware */
-#define FIJI_Q88_FORMAT_CONVERSION_UNIT 256
-
-enum Fiji_I2CLineID {
- Fiji_I2CLineID_DDC1 = 0x90,
- Fiji_I2CLineID_DDC2 = 0x91,
- Fiji_I2CLineID_DDC3 = 0x92,
- Fiji_I2CLineID_DDC4 = 0x93,
- Fiji_I2CLineID_DDC5 = 0x94,
- Fiji_I2CLineID_DDC6 = 0x95,
- Fiji_I2CLineID_SCLSDA = 0x96,
- Fiji_I2CLineID_DDCVGA = 0x97
-};
-
-#define Fiji_I2C_DDC1DATA 0
-#define Fiji_I2C_DDC1CLK 1
-#define Fiji_I2C_DDC2DATA 2
-#define Fiji_I2C_DDC2CLK 3
-#define Fiji_I2C_DDC3DATA 4
-#define Fiji_I2C_DDC3CLK 5
-#define Fiji_I2C_SDA 40
-#define Fiji_I2C_SCL 41
-#define Fiji_I2C_DDC4DATA 65
-#define Fiji_I2C_DDC4CLK 66
-#define Fiji_I2C_DDC5DATA 0x48
-#define Fiji_I2C_DDC5CLK 0x49
-#define Fiji_I2C_DDC6DATA 0x4a
-#define Fiji_I2C_DDC6CLK 0x4b
-#define Fiji_I2C_DDCVGADATA 0x4c
-#define Fiji_I2C_DDCVGACLK 0x4d
-
-#define FIJI_UNUSED_GPIO_PIN 0x7F
-
-extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
-extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr);
-extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display);
-int fiji_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input);
-int fiji_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate);
-int fiji_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-
-#endif /* _FIJI_HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
deleted file mode 100644
index f5992ea0c56f..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ /dev/null
@@ -1,610 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "hwmgr.h"
-#include "smumgr.h"
-#include "fiji_hwmgr.h"
-#include "fiji_powertune.h"
-#include "fiji_smumgr.h"
-#include "smu73_discrete.h"
-#include "pp_debug.h"
-
-#define VOLTAGE_SCALE 4
-#define POWERTUNE_DEFAULT_SET_MAX 1
-
-const struct fiji_pt_defaults fiji_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
- /*sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc */
- {1, 0xF, 0xFD,
- /* TDC_MAWt, TdcWaterfallCtl, DTEAmbientTempBase */
- 0x19, 5, 45}
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *fiji_hwmgr = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint32_t tmp = 0;
-
- if(table_info &&
- table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX &&
- table_info->cac_dtp_table->usPowerTuneDataSetID)
- fiji_hwmgr->power_tune_defaults =
- &fiji_power_tune_data_set_array
- [table_info->cac_dtp_table->usPowerTuneDataSetID - 1];
- else
- fiji_hwmgr->power_tune_defaults = &fiji_power_tune_data_set_array[0];
-
- /* Assume disabled */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SQRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DBRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TDRamping);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TCPRamping);
-
- fiji_hwmgr->dte_tj_offset = tmp;
-
- if (!tmp) {
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC);
-
- fiji_hwmgr->fast_watermark_threshold = 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- tmp = 1;
- fiji_hwmgr->enable_dte_feature = tmp ? false : true;
- fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
- fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
- }
- }
-}
-
-/* PPGen has the gain setting generated in x * 100 unit
- * This function is to convert the unit to x * 4096(0x1000) unit.
- * This is the unit expected by SMC firmware
- */
-static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
-{
- uint32_t tmp;
- tmp = raw_setting * 4096 / 100;
- return (uint16_t)tmp;
-}
-
-static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
-{
- switch (line) {
- case Fiji_I2CLineID_DDC1 :
- *scl = Fiji_I2C_DDC1CLK;
- *sda = Fiji_I2C_DDC1DATA;
- break;
- case Fiji_I2CLineID_DDC2 :
- *scl = Fiji_I2C_DDC2CLK;
- *sda = Fiji_I2C_DDC2DATA;
- break;
- case Fiji_I2CLineID_DDC3 :
- *scl = Fiji_I2C_DDC3CLK;
- *sda = Fiji_I2C_DDC3DATA;
- break;
- case Fiji_I2CLineID_DDC4 :
- *scl = Fiji_I2C_DDC4CLK;
- *sda = Fiji_I2C_DDC4DATA;
- break;
- case Fiji_I2CLineID_DDC5 :
- *scl = Fiji_I2C_DDC5CLK;
- *sda = Fiji_I2C_DDC5DATA;
- break;
- case Fiji_I2CLineID_DDC6 :
- *scl = Fiji_I2C_DDC6CLK;
- *sda = Fiji_I2C_DDC6DATA;
- break;
- case Fiji_I2CLineID_SCLSDA :
- *scl = Fiji_I2C_SCL;
- *sda = Fiji_I2C_SDA;
- break;
- case Fiji_I2CLineID_DDCVGA :
- *scl = Fiji_I2C_DDCVGACLK;
- *sda = Fiji_I2C_DDCVGADATA;
- break;
- default:
- *scl = 0;
- *sda = 0;
- break;
- }
-}
-
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
- SMU73_Discrete_DpmTable *dpm_table = &(data->smc_state_table);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table;
- struct pp_advance_fan_control_parameters *fan_table=
- &hwmgr->thermal_controller.advanceFanControlParameters;
- uint8_t uc_scl, uc_sda;
-
- /* TDP number of fraction bits are changed from 8 to 7 for Fiji
- * as requested by SMC team
- */
- dpm_table->DefaultTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
- dpm_table->TargetTdp = PP_HOST_TO_SMC_US(
- (uint16_t)(cac_dtp_table->usTDP * 128));
-
- PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255,
- "Target Operating Temp is out of Range!",);
-
- dpm_table->GpuTjMax = (uint8_t)(cac_dtp_table->usTargetOperatingTemp);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = defaults->DTEAmbientTempBase;
-
- /* The following are for new Fiji Multi-input fan/thermal control */
- dpm_table->TemperatureLimitEdge = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTargetOperatingTemp * 256);
- dpm_table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitHotspot * 256);
- dpm_table->TemperatureLimitLiquid1 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid1 * 256);
- dpm_table->TemperatureLimitLiquid2 = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitLiquid2 * 256);
- dpm_table->TemperatureLimitVrVddc = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrVddc * 256);
- dpm_table->TemperatureLimitVrMvdd = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitVrMvdd * 256);
- dpm_table->TemperatureLimitPlx = PP_HOST_TO_SMC_US(
- cac_dtp_table->usTemperatureLimitPlx * 256);
-
- dpm_table->FanGainEdge = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainEdge));
- dpm_table->FanGainHotspot = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHotspot));
- dpm_table->FanGainLiquid = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainLiquid));
- dpm_table->FanGainVrVddc = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrVddc));
- dpm_table->FanGainVrMvdd = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainVrMvdd));
- dpm_table->FanGainPlx = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainPlx));
- dpm_table->FanGainHbm = PP_HOST_TO_SMC_US(
- scale_fan_gain_settings(fan_table->usFanGainHbm));
-
- dpm_table->Liquid1_I2C_address = cac_dtp_table->ucLiquid1_I2C_address;
- dpm_table->Liquid2_I2C_address = cac_dtp_table->ucLiquid2_I2C_address;
- dpm_table->Vr_I2C_address = cac_dtp_table->ucVr_I2C_address;
- dpm_table->Plx_I2C_address = cac_dtp_table->ucPlx_I2C_address;
-
- get_scl_sda_value(cac_dtp_table->ucLiquid_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Liquid_I2C_LineSCL = uc_scl;
- dpm_table->Liquid_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucVr_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Vr_I2C_LineSCL = uc_scl;
- dpm_table->Vr_I2C_LineSDA = uc_sda;
-
- get_scl_sda_value(cac_dtp_table->ucPlx_I2C_Line, &uc_scl, &uc_sda);
- dpm_table->Plx_I2C_LineSCL = uc_scl;
- dpm_table->Plx_I2C_LineSDA = uc_sda;
-
- return 0;
-}
-
-static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
- data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
- data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC;
- data->power_tune_table.SviLoadLineTrimVddC = 3;
- data->power_tune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
-{
- uint16_t tdc_limit;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
-
- /* TDC number of fraction bits are changed from 8 to 7
- * for Fiji as requested by SMC team
- */
- tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128);
- data->power_tune_table.TDC_VDDC_PkgLimit =
- CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
- data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- defaults->TDC_VDDC_ThrottleReleaseLimitPerc;
- data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt;
-
- return 0;
-}
-
-static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- const struct fiji_pt_defaults *defaults = data->power_tune_defaults;
- uint32_t temp;
-
- if (fiji_read_smc_sram_dword(hwmgr->smumgr,
- fuse_table_offset +
- offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
- (uint32_t *)&temp, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
- return -EINVAL);
- else {
- data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl;
- data->power_tune_table.LPMLTemperatureMin =
- (uint8_t)((temp >> 16) & 0xff);
- data->power_tune_table.LPMLTemperatureMax =
- (uint8_t)((temp >> 8) & 0xff);
- data->power_tune_table.Reserved = (uint8_t)(temp & 0xff);
- }
- return 0;
-}
-
-static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.LPMLTemperatureScaler[i] = 0;
-
- return 0;
-}
-
-static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if( (hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity & (1 << 15)) ||
- 0 == hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity )
- hwmgr->thermal_controller.advanceFanControlParameters.
- usFanOutputSensitivity = hwmgr->thermal_controller.
- advanceFanControlParameters.usDefaultFanOutputSensitivity;
-
- data->power_tune_table.FuzzyFan_PwmSetDelta =
- PP_HOST_TO_SMC_US(hwmgr->thermal_controller.
- advanceFanControlParameters.usFanOutputSensitivity);
- return 0;
-}
-
-static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- /* Currently not used. Set all to zero. */
- for (i = 0; i < 16; i++)
- data->power_tune_table.GnbLPML[i] = 0;
-
- return 0;
-}
-
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- /* int i, min, max;
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint8_t * pHiVID = data->power_tune_table.BapmVddCVidHiSidd;
- uint8_t * pLoVID = data->power_tune_table.BapmVddCVidLoSidd;
-
- min = max = pHiVID[0];
- for (i = 0; i < 8; i++) {
- if (0 != pHiVID[i]) {
- if (min > pHiVID[i])
- min = pHiVID[i];
- if (max < pHiVID[i])
- max = pHiVID[i];
- }
-
- if (0 != pLoVID[i]) {
- if (min > pLoVID[i])
- min = pLoVID[i];
- if (max < pLoVID[i])
- max = pLoVID[i];
- }
- }
-
- PP_ASSERT_WITH_CODE((0 != min) && (0 != max), "BapmVddcVidSidd table does not exist!", return int_Failed);
- data->power_tune_table.GnbLPMLMaxVid = (uint8_t)max;
- data->power_tune_table.GnbLPMLMinVid = (uint8_t)min;
-*/
- return 0;
-}
-
-static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- uint16_t HiSidd = data->power_tune_table.BapmVddCBaseLeakageHiSidd;
- uint16_t LoSidd = data->power_tune_table.BapmVddCBaseLeakageLoSidd;
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
-
- HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
- LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
-
- data->power_tune_table.BapmVddCBaseLeakageHiSidd =
- CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
- data->power_tune_table.BapmVddCBaseLeakageLoSidd =
- CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
-
- return 0;
-}
-
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- uint32_t pm_fuse_table_offset;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (fiji_read_smc_sram_dword(hwmgr->smumgr,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU73_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to get pm_fuse_table_offset Failed!",
- return -EINVAL);
-
- /* DW6 */
- if (fiji_populate_svi_load_line(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate SviLoadLine Failed!",
- return -EINVAL);
- /* DW7 */
- if (fiji_populate_tdc_limit(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TDCLimit Failed!", return -EINVAL);
- /* DW8 */
- if (fiji_populate_dw8(hwmgr, pm_fuse_table_offset))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate TdcWaterfallCtl, "
- "LPMLTemperature Min and Max Failed!",
- return -EINVAL);
-
- /* DW9-DW12 */
- if (0 != fiji_populate_temperature_scaler(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate LPMLTemperatureScaler Failed!",
- return -EINVAL);
-
- /* DW13-DW14 */
- if(fiji_populate_fuzzy_fan(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate Fuzzy Fan Control parameters Failed!",
- return -EINVAL);
-
- /* DW15-DW18 */
- if (fiji_populate_gnb_lpml(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Failed!",
- return -EINVAL);
-
- /* DW19 */
- if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
- /* DW20 */
- if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate BapmVddCBaseLeakage Hi and Lo "
- "Sidd Failed!", return -EINVAL);
-
- if (fiji_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
- (uint8_t *)&data->power_tune_table,
- sizeof(struct SMU73_Discrete_PmFuses), data->sram_end))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to download PmFuseTable Failed!",
- return -EINVAL);
- }
- return 0;
-}
-
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC)) {
- int smc_result;
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_EnableCac));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable CAC in SMC.", result = -1);
-
- data->cac_enabled = (0 == smc_result) ? true : false;
- }
- return result;
-}
-
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC) && data->cac_enabled) {
- int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_DisableCac));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable CAC in SMC.", result = -1);
-
- data->cac_enabled = false;
- }
- return result;
-}
-
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
-
- if(data->power_containment_features &
- POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_PkgPwrSetLimit, n);
- return 0;
-}
-
-static int fiji_set_overdriver_target_tdp(struct pp_hwmgr *pHwMgr, uint32_t target_tdp)
-{
- return smum_send_msg_to_smc_with_parameter(pHwMgr->smumgr,
- PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
-}
-
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- int smc_result;
- int result = 0;
-
- data->power_containment_features = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- if (data->enable_dte_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_EnableDTE));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable DTE in SMC.", result = -1;);
- if (0 == smc_result)
- data->power_containment_features |= POWERCONTAINMENT_FEATURE_DTE;
- }
-
- if (data->enable_tdc_limit_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_TDCLimitEnable));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable TDCLimit in SMC.", result = -1;);
- if (0 == smc_result)
- data->power_containment_features |=
- POWERCONTAINMENT_FEATURE_TDCLimit;
- }
-
- if (data->enable_pkg_pwr_tracking_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
- PP_ASSERT_WITH_CODE((0 == smc_result),
- "Failed to enable PkgPwrTracking in SMC.", result = -1;);
- if (0 == smc_result) {
- struct phm_cac_tdp_table *cac_table =
- table_info->cac_dtp_table;
- uint32_t default_limit =
- (uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
- data->power_containment_features |=
- POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
- if (fiji_set_power_limit(hwmgr, default_limit))
- printk(KERN_ERR "Failed to set Default Power Limit in SMC!");
- }
- }
- }
- return result;
-}
-
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
-{
- struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment) &&
- data->power_containment_features) {
- int smc_result;
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_TDCLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_TDCLimitDisable));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable TDCLimit in SMC.",
- result = smc_result);
- }
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_DTE) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_DisableDTE));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable DTE in SMC.",
- result = smc_result);
- }
-
- if (data->power_containment_features &
- POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
- PP_ASSERT_WITH_CODE((smc_result == 0),
- "Failed to disable PkgPwrTracking in SMC.",
- result = smc_result);
- }
- data->power_containment_features = 0;
- }
-
- return result;
-}
-
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table;
- int adjust_percent, target_tdp;
- int result = 0;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
- /* adjustment percentage has already been validated */
- adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
- hwmgr->platform_descriptor.TDPAdjustment :
- (-1 * hwmgr->platform_descriptor.TDPAdjustment);
- /* SMC requested that target_tdp to be 7 bit fraction in DPM table
- * but message to be 8 bit fraction for messages
- */
- target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
- result = fiji_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
- }
-
- return result;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
deleted file mode 100644
index fec772421733..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef FIJI_POWERTUNE_H
-#define FIJI_POWERTUNE_H
-
-enum fiji_pt_config_reg_type {
- FIJI_CONFIGREG_MMR = 0,
- FIJI_CONFIGREG_SMC_IND,
- FIJI_CONFIGREG_DIDT_IND,
- FIJI_CONFIGREG_CACHE,
- FIJI_CONFIGREG_MAX
-};
-
-/* PowerContainment Features */
-#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
-
-#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
-#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
-#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
-
-struct fiji_pt_config_reg {
- uint32_t offset;
- uint32_t mask;
- uint32_t shift;
- uint32_t value;
- enum fiji_pt_config_reg_type type;
-};
-
-struct fiji_pt_defaults
-{
- uint8_t SviLoadLineEn;
- uint8_t SviLoadLineVddC;
- uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
- uint8_t TDC_MAWt;
- uint8_t TdcWaterfallCtl;
- uint8_t DTEAmbientTempBase;
-};
-
-void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
-int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
-int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
-int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
-int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
-int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
-int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
-
-#endif /* FIJI_POWERTUNE_H */
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
deleted file mode 100644
index 8621493b8574..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_thermal.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef FIJI_THERMAL_H
-#define FIJI_THERMAL_H
-
-#include "hwmgr.h"
-
-#define FIJI_THERMAL_HIGH_ALERT_MASK 0x1
-#define FIJI_THERMAL_LOW_ALERT_MASK 0x2
-
-#define FIJI_THERMAL_MINIMUM_TEMP_READING -256
-#define FIJI_THERMAL_MAXIMUM_TEMP_READING 255
-
-#define FIJI_THERMAL_MINIMUM_ALERT_TEMP 0
-#define FIJI_THERMAL_MAXIMUM_ALERT_TEMP 255
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-
-extern int tf_fiji_thermal_initialize(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-extern int tf_fiji_thermal_enable_alert(struct pp_hwmgr *hwmgr, void *input, void *output, void *storage, int result);
-
-extern int fiji_thermal_get_temperature(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
-extern int fiji_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
-extern int fiji_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_fiji_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern int fiji_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
-extern int fiji_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
-extern int fiji_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
-extern int fiji_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t tonga_get_xclk(struct pp_hwmgr *hwmgr);
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 524d0dd4f0e9..1167205057b3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -36,13 +36,13 @@
#include "amd_acpi.h"
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
-extern int iceland_hwmgr_init(struct pp_hwmgr *hwmgr);
+static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
+static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
uint8_t convert_to_vid(uint16_t vddc)
{
@@ -79,21 +79,32 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
- iceland_hwmgr_init(hwmgr);
+ topaz_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
+ hwmgr->pp_table_version = PP_TABLE_V0;
break;
case CHIP_TONGA:
- tonga_hwmgr_init(hwmgr);
+ tonga_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK);
break;
case CHIP_FIJI:
- fiji_hwmgr_init(hwmgr);
+ fiji_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_SMC_VOLTAGE_CONTROL_MASK |
+ PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
- polaris10_hwmgr_init(hwmgr);
+ polaris_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
default:
return -EINVAL;
}
+ smu7_hwmgr_init(hwmgr);
break;
default:
return -EINVAL;
@@ -388,12 +399,9 @@ int phm_reset_single_dpm_table(void *table,
struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
- PP_ASSERT_WITH_CODE(count <= max,
- "Fatal error, can not set up single DPM table entries to exceed max number!",
- );
+ dpm_table->count = count > max ? max : count;
- dpm_table->count = count;
- for (i = 0; i < max; i++)
+ for (i = 0; i < dpm_table->count; i++)
dpm_table->dpm_level[i].enabled = false;
return 0;
@@ -713,3 +721,95 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
return ret;
}
+int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ /* power tune caps Assume disabled */
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ if (hwmgr->chip_id == CHIP_POLARIS11)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SPLLShutdownSupport);
+ return 0;
+}
+
+int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ return 0;
+}
+
+int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+
+ return 0;
+}
+
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TablelessHardwareInterface);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_CAC);
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
deleted file mode 100644
index 47949f5cd073..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Author: Huang Rui <ray.huang@amd.com>
- *
- */
-
-#include "hwmgr.h"
-#include "iceland_clockpowergating.h"
-#include "ppsmc.h"
-#include "iceland_hwmgr.h"
-
-int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
-{
- /* iceland does not have MM hardware block */
- return 0;
-}
-
-static int iceland_phm_powerup_uvd(struct pp_hwmgr *hwmgr)
-{
- /* iceland does not have MM hardware block */
- return 0;
-}
-
-static int iceland_phm_powerdown_vce(struct pp_hwmgr *hwmgr)
-{
- /* iceland does not have MM hardware block */
- return 0;
-}
-
-static int iceland_phm_powerup_vce(struct pp_hwmgr *hwmgr)
-{
- /* iceland does not have MM hardware block */
- return 0;
-}
-
-int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum
- PHM_AsicBlock block, enum PHM_ClockGateSetting gating)
-{
- int ret = 0;
-
- switch (block) {
- case PHM_AsicBlock_UVD_MVC:
- case PHM_AsicBlock_UVD:
- case PHM_AsicBlock_UVD_HD:
- case PHM_AsicBlock_UVD_SD:
- if (gating == PHM_ClockGateSetting_StaticOff)
- ret = iceland_phm_powerdown_uvd(hwmgr);
- else
- ret = iceland_phm_powerup_uvd(hwmgr);
- break;
- case PHM_AsicBlock_GFX:
- default:
- break;
- }
-
- return ret;
-}
-
-int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
-{
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- data->uvd_power_gated = false;
- data->vce_power_gated = false;
-
- iceland_phm_powerup_uvd(hwmgr);
- iceland_phm_powerup_vce(hwmgr);
-
- return 0;
-}
-
-int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
-{
- if (bgate) {
- iceland_update_uvd_dpm(hwmgr, true);
- iceland_phm_powerdown_uvd(hwmgr);
- } else {
- iceland_phm_powerup_uvd(hwmgr);
- iceland_update_uvd_dpm(hwmgr, false);
- }
-
- return 0;
-}
-
-int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
-{
- if (bgate)
- return iceland_phm_powerdown_vce(hwmgr);
- else
- return iceland_phm_powerup_vce(hwmgr);
-
- return 0;
-}
-
-int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr,
- const uint32_t *msg_id)
-{
- /* iceland does not have MM hardware block */
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
deleted file mode 100644
index ff5ef00c7c68..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_clockpowergating.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Author: Huang Rui <ray.huang@amd.com>
- *
- */
-
-#ifndef _ICELAND_CLOCK_POWER_GATING_H_
-#define _ICELAND_CLOCK_POWER_GATING_H_
-
-#include "iceland_hwmgr.h"
-#include "pp_asicblocks.h"
-
-extern int iceland_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern int iceland_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int iceland_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int iceland_phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
-extern int iceland_phm_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
-extern int iceland_phm_update_clock_gatings(struct pp_hwmgr *hwmgr, const uint32_t *msg_id);
-#endif /* _ICELAND_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
deleted file mode 100644
index a7b4bc6caea2..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_dyn_defaults.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef ICELAND_DYN_DEFAULTS_H
-#define ICELAND_DYN_DEFAULTS_H
-
-enum ICELANDdpm_TrendDetection
-{
- ICELANDdpm_TrendDetection_AUTO,
- ICELANDdpm_TrendDetection_UP,
- ICELANDdpm_TrendDetection_DOWN
-};
-typedef enum ICELANDdpm_TrendDetection ICELANDdpm_TrendDetection;
-
-
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1 0x000400
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2 0xC00080
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3 0xC00200
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4 0xC01680
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5 0xC00033
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6 0xC00033
-#define PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7 0x3FFFC000
-
-
-#define PPICELAND_THERMALPROTECTCOUNTER_DFLT 0x200
-
-#define PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT 0
-
-#define PPICELAND_STATICSCREENTHRESHOLD_DFLT 0x00C8
-
-#define PPICELAND_GFXIDLECLOCKSTOPTHRESHOLD_DFLT 0x200
-
-#define PPICELAND_REFERENCEDIVIDER_DFLT 4
-
-#define PPICELAND_ULVVOLTAGECHANGEDELAY_DFLT 1687
-
-#define PPICELAND_CGULVPARAMETER_DFLT 0x00040035
-#define PPICELAND_CGULVCONTROL_DFLT 0x00007450
-#define PPICELAND_TARGETACTIVITY_DFLT 30
-#define PPICELAND_MCLK_TARGETACTIVITY_DFLT 10
-
-#endif
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
deleted file mode 100644
index 5abe43360ec0..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/iceland_hwmgr.c
+++ /dev/null
@@ -1,5684 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Author: Huang Rui <ray.huang@amd.com>
- *
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include "linux/delay.h"
-#include "pp_acpi.h"
-#include "hwmgr.h"
-#include <atombios.h>
-#include "iceland_hwmgr.h"
-#include "pptable.h"
-#include "processpptables.h"
-#include "pp_debug.h"
-#include "ppsmc.h"
-#include "cgs_common.h"
-#include "pppcielanes.h"
-#include "iceland_dyn_defaults.h"
-#include "smumgr.h"
-#include "iceland_smumgr.h"
-#include "iceland_clockpowergating.h"
-#include "iceland_thermal.h"
-#include "iceland_powertune.h"
-
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
-#include "smu/smu_7_1_1_d.h"
-#include "smu/smu_7_1_1_sh_mask.h"
-
-#include "cgs_linux.h"
-#include "eventmgr.h"
-#include "amd_pcie_helpers.h"
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define MC_CG_SEQ_DRAMCONF_S0 0x05
-#define MC_CG_SEQ_DRAMCONF_S1 0x06
-#define MC_CG_SEQ_YCLK_SUSPEND 0x04
-#define MC_CG_SEQ_YCLK_RESUME 0x0a
-
-#define PCIE_BUS_CLK 10000
-#define TCLK (PCIE_BUS_CLK / 10)
-
-#define SMC_RAM_END 0x40000
-#define SMC_CG_IND_START 0xc0030000
-#define SMC_CG_IND_END 0xc0040000 /* First byte after SMC_CG_IND*/
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-const uint32_t iceland_magic = (uint32_t)(PHM_VIslands_Magic);
-
-#define MC_SEQ_MISC0_GDDR5_SHIFT 28
-#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
-#define MC_SEQ_MISC0_GDDR5_VALUE 5
-
-/** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
-enum DPM_EVENT_SRC {
- DPM_EVENT_SRC_ANALOG = 0, /* Internal analog trip point */
- DPM_EVENT_SRC_EXTERNAL = 1, /* External (GPIO 17) signal */
- DPM_EVENT_SRC_DIGITAL = 2, /* Internal digital trip point (DIG_THERM_DPM) */
- DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, /* Internal analog or external */
- DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4 /* Internal digital or external */
-};
-
-static int iceland_read_clock_registers(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- data->clock_registers.vCG_SPLL_FUNC_CNTL =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_3 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
- data->clock_registers.vCG_SPLL_FUNC_CNTL_4 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
- data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
- cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
- data->clock_registers.vDLL_CNTL =
- cgs_read_register(hwmgr->device, mmDLL_CNTL);
- data->clock_registers.vMCLK_PWRMGT_CNTL =
- cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
- data->clock_registers.vMPLL_AD_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
- data->clock_registers.vMPLL_DQ_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
- data->clock_registers.vMPLL_FUNC_CNTL =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
- data->clock_registers.vMPLL_FUNC_CNTL_1 =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
- data->clock_registers.vMPLL_FUNC_CNTL_2 =
- cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
- data->clock_registers.vMPLL_SS1 =
- cgs_read_register(hwmgr->device, mmMPLL_SS1);
- data->clock_registers.vMPLL_SS2 =
- cgs_read_register(hwmgr->device, mmMPLL_SS2);
-
- return 0;
-}
-
-/**
- * Find out if memory is GDDR5.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_get_memory_type(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- uint32_t temp;
-
- temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0);
-
- data->is_memory_GDDR5 = (MC_SEQ_MISC0_GDDR5_VALUE ==
- ((temp & MC_SEQ_MISC0_GDDR5_MASK) >>
- MC_SEQ_MISC0_GDDR5_SHIFT));
-
- return 0;
-}
-
-int iceland_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
-{
- /* iceland does not have MM hardware blocks */
- return 0;
-}
-
-/**
- * Enables Dynamic Power Management by SMC
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, STATIC_PM_EN, 1);
-
- return 0;
-}
-
-/**
- * Find the MC microcode version and store it in the HwMgr struct
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
-{
- cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-
- hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
-
- return 0;
-}
-
-static int iceland_init_sclk_threshold(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- data->low_sclk_interrupt_threshold = 0;
-
- return 0;
-}
-
-
-static int iceland_setup_asic_task(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- tmp_result = iceland_read_clock_registers(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to read clock registers!", result = tmp_result);
-
- tmp_result = iceland_get_memory_type(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get memory type!", result = tmp_result);
-
- tmp_result = iceland_enable_acpi_power_management(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable ACPI power management!", result = tmp_result);
-
- tmp_result = iceland_get_mc_microcode_version(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to get MC microcode version!", result = tmp_result);
-
- tmp_result = iceland_init_sclk_threshold(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to init sclk threshold!", result = tmp_result);
-
- return result;
-}
-
-static bool cf_iceland_voltage_control(struct pp_hwmgr *hwmgr)
-{
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- return ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control;
-}
-
-/*
- * -------------- Voltage Tables ----------------------
- * If the voltage table would be bigger than what will fit into the
- * state table on the SMC keep only the higher entries.
- */
-
-static void iceland_trim_voltage_table_to_fit_state_table(
- struct pp_hwmgr *hwmgr,
- uint32_t max_voltage_steps,
- pp_atomctrl_voltage_table *voltage_table)
-{
- unsigned int i, diff;
-
- if (voltage_table->count <= max_voltage_steps) {
- return;
- }
-
- diff = voltage_table->count - max_voltage_steps;
-
- for (i = 0; i < max_voltage_steps; i++) {
- voltage_table->entries[i] = voltage_table->entries[i + diff];
- }
-
- voltage_table->count = max_voltage_steps;
-
- return;
-}
-
-/**
- * Enable voltage control
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_enable_voltage_control(struct pp_hwmgr *hwmgr)
-{
- /* enable voltage control */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
-
- return 0;
-}
-
-static int iceland_get_svi2_voltage_table(struct pp_hwmgr *hwmgr,
- struct phm_clock_voltage_dependency_table *voltage_dependency_table,
- pp_atomctrl_voltage_table *voltage_table)
-{
- uint32_t i;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table),
- "Voltage Dependency Table empty.", return -EINVAL;);
-
- voltage_table->mask_low = 0;
- voltage_table->phase_delay = 0;
- voltage_table->count = voltage_dependency_table->count;
-
- for (i = 0; i < voltage_dependency_table->count; i++) {
- voltage_table->entries[i].value =
- voltage_dependency_table->entries[i].v;
- voltage_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-/**
- * Create Voltage Tables.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_construct_voltage_tables(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- int result;
-
- /* GPIO voltage */
- if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
- &data->vddc_voltage_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDC table.", return result;);
- } else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
- /* SVI2 VDDC voltage */
- result = iceland_get_svi2_voltage_table(hwmgr,
- hwmgr->dyn_state.vddc_dependency_on_mclk,
- &data->vddc_voltage_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddc_voltage_table.count <= (SMU71_MAX_LEVELS_VDDC)),
- "Too many voltage values for VDDC. Trimming to fit state table.",
- iceland_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU71_MAX_LEVELS_VDDC, &(data->vddc_voltage_table));
- );
-
- /* GPIO */
- if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT, &(data->vddci_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve VDDCI table.", return result;);
- }
-
- /* SVI2 VDDCI voltage */
- if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control) {
- result = iceland_get_svi2_voltage_table(hwmgr,
- hwmgr->dyn_state.vddci_dependency_on_mclk,
- &data->vddci_voltage_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.", return result;);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->vddci_voltage_table.count <= (SMU71_MAX_LEVELS_VDDCI)),
- "Too many voltage values for VDDCI. Trimming to fit state table.",
- iceland_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU71_MAX_LEVELS_VDDCI, &(data->vddci_voltage_table));
- );
-
-
- /* GPIO */
- if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
- result = atomctrl_get_voltage_table_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT, &(data->mvdd_voltage_table));
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve table.", return result;);
- }
-
- /* SVI2 voltage control */
- if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
- result = iceland_get_svi2_voltage_table(hwmgr,
- hwmgr->dyn_state.mvdd_dependency_on_mclk,
- &data->mvdd_voltage_table);
- PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.", return result;);
- }
-
- PP_ASSERT_WITH_CODE(
- (data->mvdd_voltage_table.count <= (SMU71_MAX_LEVELS_MVDD)),
- "Too many voltage values for MVDD. Trimming to fit state table.",
- iceland_trim_voltage_table_to_fit_state_table(hwmgr,
- SMU71_MAX_LEVELS_MVDD, &(data->mvdd_voltage_table));
- );
-
- return 0;
-}
-
-/*---------------------------MC----------------------------*/
-
-uint8_t iceland_get_memory_module_index(struct pp_hwmgr *hwmgr)
-{
- return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
-}
-
-bool iceland_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
-{
- bool result = true;
-
- switch (inReg) {
- case mmMC_SEQ_RAS_TIMING:
- *outReg = mmMC_SEQ_RAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_DLL_STBY:
- *outReg = mmMC_SEQ_DLL_STBY_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD0:
- *outReg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CMD1:
- *outReg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
-
- case mmMC_SEQ_G5PDX_CTRL:
- *outReg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
-
- case mmMC_SEQ_CAS_TIMING:
- *outReg = mmMC_SEQ_CAS_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING:
- *outReg = mmMC_SEQ_MISC_TIMING_LP;
- break;
-
- case mmMC_SEQ_MISC_TIMING2:
- *outReg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CMD:
- *outReg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
-
- case mmMC_SEQ_PMG_DVS_CTL:
- *outReg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D0:
- *outReg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_RD_CTL_D1:
- *outReg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D0:
- *outReg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_D1:
- *outReg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
-
- case mmMC_PMG_CMD_EMRS:
- *outReg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS:
- *outReg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
-
- case mmMC_PMG_CMD_MRS1:
- *outReg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
-
- case mmMC_SEQ_PMG_TIMING:
- *outReg = mmMC_SEQ_PMG_TIMING_LP;
- break;
-
- case mmMC_PMG_CMD_MRS2:
- *outReg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
-
- case mmMC_SEQ_WR_CTL_2:
- *outReg = mmMC_SEQ_WR_CTL_2_LP;
- break;
-
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-int iceland_set_s0_mc_reg_index(phw_iceland_mc_reg_table *table)
-{
- uint32_t i;
- uint16_t address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- iceland_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
- ? address : table->mc_reg_address[i].s1;
- }
- return 0;
-}
-
-int iceland_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table, phw_iceland_mc_reg_table *ni_table)
-{
- uint8_t i, j;
-
- PP_ASSERT_WITH_CODE((table->last <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
- "Invalid VramInfo table.", return -1);
-
- for (i = 0; i < table->last; i++) {
- ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
- }
- ni_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ni_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++) {
- ni_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- }
-
- ni_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-/**
- * VBIOS omits some information to reduce size, we need to recover them here.
- * 1. when we see mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write to mmMC_PMG_CMD_EMRS /_LP[15:0].
- * Bit[15:0] MRS, need to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- * 2. when we see mmMC_SEQ_RESERVE_M, bit[15:0] EMRS2, need to be write to mmMC_PMG_CMD_MRS1/_LP[15:0].
- * 3. need to set these data for each clock range
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param table the address of MCRegTable
- * @return always 0
- */
-static int iceland_set_mc_special_registers(struct pp_hwmgr *hwmgr, phw_iceland_mc_reg_table *table)
-{
- uint8_t i, j, k;
- uint32_t temp_reg;
- const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- for (i = 0, j = table->last; i < table->last; i++) {
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- switch (table->mc_reg_address[i].s1) {
- /*
- * mmMC_SEQ_MISC1, bit[31:16] EMRS1, need to be write
- * to mmMC_PMG_CMD_EMRS/_LP[15:0]. Bit[15:0] MRS, need
- * to be update mmMC_PMG_CMD_MRS/_LP[15:0]
- */
- case mmMC_SEQ_MISC1:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) |
- ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
- PP_ASSERT_WITH_CODE((j < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
-
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
-
- if (!data->is_memory_GDDR5) {
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
-
- if (!data->is_memory_GDDR5) {
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- }
-
- break;
-
- case mmMC_SEQ_RESERVE_M:
- temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) |
- (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- PP_ASSERT_WITH_CODE((j <= SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE),
- "Invalid VramInfo table.", return -1);
- break;
-
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-
-static int iceland_set_valid_flag(phw_iceland_mc_reg_table *table)
-{
- uint8_t i, j;
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->validflag |= (1<<i);
- break;
- }
- }
- }
-
- return 0;
-}
-
-static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- pp_atomctrl_mc_reg_table *table;
- phw_iceland_mc_reg_table *ni_table = &data->iceland_mc_reg_table;
- uint8_t module_index = iceland_get_memory_module_index(hwmgr);
-
- table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
-
- if (NULL == table)
- return -ENOMEM;
-
- /* Program additional LP registers that are no longer programmed by VBIOS */
- cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
- cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
- cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
- cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
-
- memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
-
- result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
-
- if (0 == result)
- result = iceland_copy_vbios_smc_reg_table(table, ni_table);
-
- if (0 == result) {
- iceland_set_s0_mc_reg_index(ni_table);
- result = iceland_set_mc_special_registers(hwmgr, ni_table);
- }
-
- if (0 == result)
- iceland_set_valid_flag(ni_table);
-
- kfree(table);
- return result;
-}
-
-/**
- * Programs static screed detection parameters
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_program_static_screen_threshold_parameters(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* Set static screen threshold unit*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
- data->static_screen_threshold_unit);
- /* Set static screen threshold*/
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
- data->static_screen_threshold);
-
- return 0;
-}
-
-/**
- * Setup display gap for glitch free memory clock switching.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_enable_display_gap(struct pp_hwmgr *hwmgr)
-{
- uint32_t display_gap = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
-
- display_gap = PHM_SET_FIELD(display_gap,
- CG_DISPLAY_GAP_CNTL, DISP_GAP, DISPLAY_GAP_IGNORE);
-
- display_gap = PHM_SET_FIELD(display_gap,
- CG_DISPLAY_GAP_CNTL, DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_DISPLAY_GAP_CNTL, display_gap);
-
- return 0;
-}
-
-/**
- * Programs activity state transition voting clients
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-int iceland_program_voting_clients(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* Clear reset for voting clients before enabling DPM */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
- return 0;
-}
-
-static int iceland_upload_firmware(struct pp_hwmgr *hwmgr)
-{
- int ret = 0;
-
- if (!iceland_is_smc_ram_running(hwmgr->smumgr))
- ret = iceland_smu_upload_firmware_image(hwmgr->smumgr);
-
- return ret;
-}
-
-/**
- * Get the location of various tables inside the FW image.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- */
-static int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- uint32_t tmp;
- int result;
- bool error = 0;
-
- result = iceland_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, DpmTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->dpm_table_start = tmp;
- }
-
- error |= (0 != result);
-
- result = iceland_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, SoftRegisters),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->soft_regs_start = tmp;
- }
-
- error |= (0 != result);
-
-
- result = iceland_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcRegisterTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->mc_reg_table_start = tmp;
- }
-
- result = iceland_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, FanTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->fan_table_start = tmp;
- }
-
- error |= (0 != result);
-
- result = iceland_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->arb_table_start = tmp;
- }
-
- error |= (0 != result);
-
-
- result = iceland_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, Version),
- &tmp, data->sram_end);
-
- if (0 == result) {
- hwmgr->microcode_version_info.SMC = tmp;
- }
-
- error |= (0 != result);
-
- result = iceland_read_smc_sram_dword(hwmgr->smumgr,
- SMU71_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU71_Firmware_Header, UlvSettings),
- &tmp, data->sram_end);
-
- if (0 == result) {
- data->ulv_settings_start = tmp;
- }
-
- error |= (0 != result);
-
- return error ? 1 : 0;
-}
-
-/*
-* Copy one arb setting to another and then switch the active set.
-* arbFreqSrc and arbFreqDest is one of the MC_CG_ARB_FREQ_Fx constants.
-*/
-int iceland_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
- uint32_t arbFreqSrc, uint32_t arbFreqDest)
-{
- uint32_t mc_arb_dram_timing;
- uint32_t mc_arb_dram_timing2;
- uint32_t burst_time;
- uint32_t mc_cg_config;
-
- switch (arbFreqSrc) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
- break;
-
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
- burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
- break;
-
- default:
- return -1;
- }
-
- switch (arbFreqDest) {
- case MC_CG_ARB_FREQ_F0:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
- break;
-
- case MC_CG_ARB_FREQ_F1:
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
- break;
-
- default:
- return -1;
- }
-
- mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
- mc_cg_config |= 0x0000000F;
- cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
- PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arbFreqDest);
-
- return 0;
-}
-
-/**
- * Initial switch from ARB F0->F1
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-int iceland_initial_switch_from_arb_f0_to_f1(struct pp_hwmgr *hwmgr)
-{
- return iceland_copy_and_switch_arb_sets(hwmgr, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-/* ---------------------------------------- ULV related functions ----------------------------------------------------*/
-
-
-static int iceland_reset_single_dpm_table(
- struct pp_hwmgr *hwmgr,
- struct iceland_single_dpm_table *dpm_table,
- uint32_t count)
-{
- uint32_t i;
- if (!(count <= MAX_REGULAR_DPM_NUMBER))
- printk(KERN_ERR "[ powerplay ] Fatal error, can not set up single DPM \
- table entries to exceed max number! \n");
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
- dpm_table->dpm_levels[i].enabled = 0;
- }
-
- return 0;
-}
-
-static void iceland_setup_pcie_table_entry(
- struct iceland_single_dpm_table *dpm_table,
- uint32_t index, uint32_t pcie_gen,
- uint32_t pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = 1;
-}
-
-/*
- * Set up the PCIe DPM table as follows:
- *
- * A = Performance State, Max, Gen Speed
- * C = Performance State, Min, Gen Speed
- * 1 = Performance State, Max, Lane #
- * 3 = Performance State, Min, Lane #
- *
- * B = Power Saving State, Max, Gen Speed
- * D = Power Saving State, Min, Gen Speed
- * 2 = Power Saving State, Max, Lane #
- * 4 = Power Saving State, Min, Lane #
- *
- *
- * DPM Index Gen Speed Lane #
- * 5 A 1
- * 4 B 2
- * 3 C 1
- * 2 D 2
- * 1 C 3
- * 0 D 4
- *
- */
-static int iceland_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
- data->use_pcie_power_saving_levels),
- "No pcie performance levels!", return -EINVAL);
-
- if (data->use_pcie_performance_levels && !data->use_pcie_power_saving_levels) {
- data->pcie_gen_power_saving = data->pcie_gen_performance;
- data->pcie_lane_power_saving = data->pcie_lane_performance;
- } else if (!data->use_pcie_performance_levels && data->use_pcie_power_saving_levels) {
- data->pcie_gen_performance = data->pcie_gen_power_saving;
- data->pcie_lane_performance = data->pcie_lane_power_saving;
- }
-
- iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.pcie_speed_table, SMU71_MAX_LEVELS_LINK);
-
- /* Hardcode Pcie Table */
- iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Min_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- iceland_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
- get_pcie_gen_support(data->pcie_gen_cap, PP_Max_PCIEGen),
- get_pcie_lane_support(data->pcie_lane_cap, PP_Max_PCIELane));
- data->dpm_table.pcie_speed_table.count = 6;
-
- return 0;
-
-}
-
-
-/*
- * This function is to initalize all DPM state tables for SMU7 based on the dependency table.
- * Dynamic state patching function will then trim these state tables to the allowed range based
- * on the power policy or external client requests, such as UVD request, etc.
- */
-static int iceland_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- uint32_t i;
-
- struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
- struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
- hwmgr->dyn_state.vddc_dependency_on_mclk;
- struct phm_cac_leakage_table *std_voltage_table =
- hwmgr->dyn_state.cac_leakage_table;
-
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
- "SCLK dependency table has to have is missing. This table is mandatory", return -1);
-
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory", return -1);
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
- "VMCLK dependency table has to have is missing. This table is mandatory", return -1);
-
- /* clear the state table to reset everything to default */
- memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
- iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.sclk_table, SMU71_MAX_LEVELS_GRAPHICS);
- iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mclk_table, SMU71_MAX_LEVELS_MEMORY);
- iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vddc_table, SMU71_MAX_LEVELS_VDDC);
- iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.vdd_ci_table, SMU71_MAX_LEVELS_VDDCI);
- iceland_reset_single_dpm_table(hwmgr, &data->dpm_table.mvdd_table, SMU71_MAX_LEVELS_MVDD);
-
- PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
- "SCLK dependency table is missing. This table is mandatory", return -1);
- /* Initialize Sclk DPM table based on allow Sclk values*/
- data->dpm_table.sclk_table.count = 0;
-
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
- allowed_vdd_sclk_table->entries[i].clk) {
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
- allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
- data->dpm_table.sclk_table.count++;
- }
- }
-
- PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
- "MCLK dependency table is missing. This table is mandatory", return -1);
- /* Initialize Mclk DPM table based on allow Mclk values */
- data->dpm_table.mclk_table.count = 0;
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
- allowed_vdd_mclk_table->entries[i].clk) {
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
- allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
- data->dpm_table.mclk_table.count++;
- }
- }
-
- /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */
- for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
- data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
- data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
- /* param1 is for corresponding std voltage */
- data->dpm_table.vddc_table.dpm_levels[i].enabled = 1;
- }
-
- data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
- allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
-
- if (NULL != allowed_vdd_mclk_table) {
- /* Initialize Vddci DPM table based on allow Mclk values */
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
- data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count;
- }
-
- allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
-
- if (NULL != allowed_vdd_mclk_table) {
- /*
- * Initialize MVDD DPM table based on allow Mclk
- * values
- */
- for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
- data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
- data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1;
- }
- data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
- }
-
- /* setup PCIE gen speed levels*/
- iceland_setup_default_pcie_tables(hwmgr);
-
- /* save a copy of the default DPM table*/
- memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct iceland_dpm_table));
-
- return 0;
-}
-
-/**
- * @brief PhwIceland_GetVoltageOrder
- * Returns index of requested voltage record in lookup(table)
- * @param hwmgr - pointer to hardware manager
- * @param lookutab - lookup list to search in
- * @param voltage - voltage to look for
- * @return 0 on success
- */
-uint8_t iceland_get_voltage_index(phm_ppt_v1_voltage_lookup_table *look_up_table,
- uint16_t voltage)
-{
- uint8_t count = (uint8_t) (look_up_table->count);
- uint8_t i;
-
- PP_ASSERT_WITH_CODE((NULL != look_up_table), "Lookup Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count), "Lookup Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage equal or bigger than requested */
- if (look_up_table->entries[i].us_vdd >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i-1;
-}
-
-
-static int iceland_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
- pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
- uint16_t *lo)
-{
- uint16_t v_index;
- bool vol_found = false;
- *hi = tab->value * VOLTAGE_SCALE;
- *lo = tab->value * VOLTAGE_SCALE;
-
- /* SCLK/VDDC Dependency Table has to exist. */
- PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
- "The SCLK/VDDC Dependency Table does not exist.\n",
- return -EINVAL);
-
- if (NULL == hwmgr->dyn_state.cac_leakage_table) {
- pr_warning("CAC Leakage Table does not exist, using vddc.\n");
- return 0;
- }
-
- /*
- * Since voltage in the sclk/vddc dependency table is not
- * necessarily in ascending order because of ELB voltage
- * patching, loop through entire list to find exact voltage.
- */
- for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
- if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
- vol_found = true;
- if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
- *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
- *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
- } else {
- pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
- *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
- *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
- }
- break;
- }
- }
-
- /*
- * If voltage is not found in the first pass, loop again to
- * find the best match, equal or higher value.
- */
- if (!vol_found) {
- for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
- if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
- vol_found = true;
- if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
- *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
- *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
- } else {
- pr_warning("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
- *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
- *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
- }
- break;
- }
- }
-
- if (!vol_found)
- pr_warning("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
- }
-
- return 0;
-}
-
-static int iceland_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
- pp_atomctrl_voltage_table_entry *tab,
- SMU71_Discrete_VoltageLevel *smc_voltage_tab) {
- int result;
-
-
- result = iceland_get_std_voltage_value_sidd(hwmgr, tab,
- &smc_voltage_tab->StdVoltageHiSidd,
- &smc_voltage_tab->StdVoltageLoSidd);
- if (0 != result) {
- smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
- smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
- }
-
- smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
- CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
- CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
-
- return 0;
-}
-
-/**
- * Vddc table preparation for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-static int iceland_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- unsigned int count;
- int result;
-
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- table->VddcLevelCount = data->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
- &data->vddc_voltage_table.entries[count],
- &table->VddcLevel[count]);
- PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
-
- /* GPIO voltage control */
- if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
- table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
- else if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
- table->VddcLevel[count].Smio = 0;
- }
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
-
- return 0;
-}
-
-/**
- * Vddci table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int iceland_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- int result;
- uint32_t count;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- table->VddciLevelCount = data->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
- &data->vddci_voltage_table.entries[count],
- &table->VddciLevel[count]);
- PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
-
- /* GPIO voltage control */
- if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->vdd_ci_control)
- table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
- else
- table->VddciLevel[count].Smio = 0;
- }
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
-
- return 0;
-}
-
-/**
- * Mvdd table preparation for SMC.
- *
- * @param *hwmgr The address of the hardware manager.
- * @param *table The SMC DPM table structure to be populated.
- * @return 0
- */
-static int iceland_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- int result;
- uint32_t count;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- table->MvddLevelCount = data->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- result = iceland_populate_smc_voltage_table(hwmgr,
- &data->mvdd_voltage_table.entries[count],
- &table->MvddLevel[count]);
- PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDCI voltage table", return -EINVAL);
-
- /* GPIO voltage control */
- if (ICELAND_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
- table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
- else
- table->MvddLevel[count].Smio = 0;
- }
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
-
- return 0;
-}
-
-int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- uint8_t * hi_vid = data->power_tune_table.BapmVddCVidHiSidd;
- uint8_t * lo_vid = data->power_tune_table.BapmVddCVidLoSidd;
-
- PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
- "The CAC Leakage table does not exist!", return -EINVAL);
- PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
- "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
- PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
- "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
- for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
- lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
- hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
- }
- } else {
- PP_ASSERT_WITH_CODE(false, "Iceland should always support EVV", return -EINVAL);
- }
-
- return 0;
-}
-
-int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
-{
- int i;
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- uint8_t *vid = data->power_tune_table.VddCVid;
-
- PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
- "There should never be more than 8 entries for VddcVid!!!",
- return -EINVAL);
-
- for (i = 0; i < (int)data->vddc_voltage_table.count; i++) {
- vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
- }
-
- return 0;
-}
-
-/**
- * Preparation of voltage tables for SMC.
- *
- * @param hwmgr the address of the hardware manager
- * @param table the SMC DPM table structure to be populated
- * @return always 0
- */
-
-int iceland_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- int result;
-
- result = iceland_populate_smc_vddc_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDC voltage table to SMC", return -1);
-
- result = iceland_populate_smc_vdd_ci_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate VDDCI voltage table to SMC", return -1);
-
- result = iceland_populate_smc_mvdd_table(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "can not populate MVDD voltage table to SMC", return -1);
-
- return 0;
-}
-
-
-/**
- * Re-generate the DPM level mask value
- * @param hwmgr the address of the hardware manager
- */
-static uint32_t iceland_get_dpm_level_enable_mask_value(
- struct iceland_single_dpm_table * dpm_table)
-{
- uint32_t i;
- uint32_t mask_value = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask_value = mask_value << 1;
-
- if (dpm_table->dpm_levels[i-1].enabled)
- mask_value |= 0x1;
- else
- mask_value &= 0xFFFFFFFE;
- }
- return mask_value;
-}
-
-int iceland_populate_memory_timing_parameters(
- struct pp_hwmgr *hwmgr,
- uint32_t engine_clock,
- uint32_t memory_clock,
- struct SMU71_Discrete_MCArbDramTimingTableEntry *arb_regs
- )
-{
- uint32_t dramTiming;
- uint32_t dramTiming2;
- uint32_t burstTime;
- int result;
-
- result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
- engine_clock, memory_clock);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error calling VBIOS to set DRAM_TIMING.", return result);
-
- dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
- dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
- burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
-
- arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
- arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
- arb_regs->McArbBurstTime = (uint8_t)burstTime;
-
- return 0;
-}
-
-/**
- * Setup parameters for the MC ARB.
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return always 0
- * This function is to be called from the SetPowerState table.
- */
-int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- int result = 0;
- SMU71_Discrete_MCArbDramTimingTable arb_regs;
- uint32_t i, j;
-
- memset(&arb_regs, 0x00, sizeof(SMU71_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
- result = iceland_populate_memory_timing_parameters
- (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
- data->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
-
- if (0 != result) {
- break;
- }
- }
- }
-
- if (0 == result) {
- result = iceland_copy_bytes_to_smc(
- hwmgr->smumgr,
- data->arb_table_start,
- (uint8_t *)&arb_regs,
- sizeof(SMU71_Discrete_MCArbDramTimingTable),
- data->sram_end
- );
- }
-
- return result;
-}
-
-static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_DpmTable *table)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- struct iceland_dpm_table *dpm_table = &data->dpm_table;
- uint32_t i;
-
- /* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
- for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity =
- 1;
- table->LinkLevel[i].SPC =
- (uint8_t)(data->pcie_spc_cap & 0xff);
- table->LinkLevel[i].DownThreshold =
- PP_HOST_TO_SMC_UL(5);
- table->LinkLevel[i].UpThreshold =
- PP_HOST_TO_SMC_UL(30);
- }
-
- data->smc_state_table.LinkLevelCount =
- (uint8_t)dpm_table->pcie_speed_table.count;
- data->dpm_level_enable_mask.pcie_dpm_enable_mask =
- iceland_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-
- return 0;
-}
-
-static int iceland_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- return 0;
-}
-
-uint8_t iceland_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
- uint32_t voltage)
-{
- uint8_t count = (uint8_t) (voltage_table->count);
- uint8_t i = 0;
-
- PP_ASSERT_WITH_CODE((NULL != voltage_table),
- "Voltage Table empty.", return 0;);
- PP_ASSERT_WITH_CODE((0 != count),
- "Voltage Table empty.", return 0;);
-
- for (i = 0; i < count; i++) {
- /* find first voltage bigger than requested */
- if (voltage_table->entries[i].value >= voltage)
- return i;
- }
-
- /* voltage is bigger than max voltage in the table */
- return i - 1;
-}
-
-static int iceland_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- return 0;
-}
-
-static int iceland_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- return 0;
-}
-
-static int iceland_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- return 0;
-}
-
-
-static int iceland_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *tab)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
- tab->SVI2Enable |= VDDC_ON_SVI2;
-
- if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_ci_control)
- tab->SVI2Enable |= VDDCI_ON_SVI2;
- else
- tab->MergedVddci = 1;
-
- if(ICELAND_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control)
- tab->SVI2Enable |= MVDD_ON_SVI2;
-
- PP_ASSERT_WITH_CODE( tab->SVI2Enable != (VDDC_ON_SVI2 | VDDCI_ON_SVI2 | MVDD_ON_SVI2) &&
- (tab->SVI2Enable & VDDC_ON_SVI2), "SVI2 domain configuration is incorrect!", return -EINVAL);
-
- return 0;
-}
-
-static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
- struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
- uint32_t clock, uint32_t *vol)
-{
- uint32_t i = 0;
-
- /* clock - voltage dependency table is empty table */
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- /* find first sclk bigger than request */
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- *vol = allowed_clock_voltage_table->entries[i].v;
- return 0;
- }
- }
-
- /* sclk is bigger than max sclk in the dependence table */
- *vol = allowed_clock_voltage_table->entries[i - 1].v;
-
- return 0;
-}
-
-static uint8_t iceland_get_mclk_frequency_ratio(uint32_t memory_clock,
- bool strobe_mode)
-{
- uint8_t mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500) {
- mc_para_index = 0x00;
- } else if (memory_clock > 47500) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
- }
- } else {
- if (memory_clock < 65000) {
- mc_para_index = 0x00;
- } else if (memory_clock > 135000) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
- }
- }
-
- return mc_para_index;
-}
-
-static uint8_t iceland_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
-{
- uint8_t mc_para_index;
-
- if (memory_clock < 10000) {
- mc_para_index = 0;
- } else if (memory_clock >= 80000) {
- mc_para_index = 0x0f;
- } else {
- mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
- }
-
- return mc_para_index;
-}
-
-static int iceland_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
- uint32_t sclk, uint32_t *p_shed)
-{
- unsigned int i;
-
- /* use the minimum phase shedding */
- *p_shed = 1;
-
- /*
- * PPGen ensures the phase shedding limits table is sorted
- * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
- * VBIOS ensures the phase shedding masks table is sorted from
- * least phases enabled (phase shedding on) to most phases
- * enabled (phase shedding off).
- */
- for (i = 0; i < pl->count; i++) {
- if (sclk < pl->entries[i].Sclk) {
- /* Enable phase shedding */
- *p_shed = i;
- break;
- }
- }
-
- return 0;
-}
-
-static int iceland_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
- uint32_t memory_clock, uint32_t *p_shed)
-{
- unsigned int i;
-
- /* use the minimum phase shedding */
- *p_shed = 1;
-
- /*
- * PPGen ensures the phase shedding limits table is sorted
- * from lowest voltage/sclk/mclk to highest voltage/sclk/mclk.
- * VBIOS ensures the phase shedding masks table is sorted from
- * least phases enabled (phase shedding on) to most phases
- * enabled (phase shedding off).
- */
- for (i = 0; i < pl->count; i++) {
- if (memory_clock < pl->entries[i].Mclk) {
- /* Enable phase shedding */
- *p_shed = i;
- break;
- }
- }
-
- return 0;
-}
-
-/**
- * Populates the SMC MCLK structure using the provided memory clock
- *
- * @param hwmgr the address of the hardware manager
- * @param memory_clock the memory clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_calculate_mclk_params(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dllStateOn
- )
-{
- const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
- uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
- uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
- uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
- uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
- uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
- uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
- uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
-
- pp_atomctrl_memory_clock_param mpll_param;
- int result;
-
- result = atomctrl_get_memory_pll_dividers_si(hwmgr,
- memory_clock, &mpll_param, strobe_mode);
- PP_ASSERT_WITH_CODE(0 == result,
- "Error retrieving Memory Clock Parameters from VBIOS.", return result);
-
- /* MPLL_FUNC_CNTL setup*/
- mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
-
- /* MPLL_FUNC_CNTL_1 setup*/
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
- mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
- MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
-
- /* MPLL_AD_FUNC_CNTL setup*/
- mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
- MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
-
- if (data->is_memory_GDDR5) {
- /* MPLL_DQ_FUNC_CNTL setup*/
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
- mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
- MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
- /*
- ************************************
- Fref = Reference Frequency
- NF = Feedback divider ratio
- NR = Reference divider ratio
- Fnom = Nominal VCO output frequency = Fref * NF / NR
- Fs = Spreading Rate
- D = Percentage down-spread / 2
- Fint = Reference input frequency to PFD = Fref / NR
- NS = Spreading rate divider ratio = int(Fint / (2 * Fs))
- CLKS = NS - 1 = ISS_STEP_NUM[11:0]
- NV = D * Fs / Fnom * 4 * ((Fnom/Fref * NR) ^ 2)
- CLKV = 65536 * NV = ISS_STEP_SIZE[25:0]
- *************************************
- */
- pp_atomctrl_internal_ss_info ss_info;
- uint32_t freq_nom;
- uint32_t tmp;
- uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
-
- /* for GDDR5 for all modes and DDR3 */
- if (1 == mpll_param.qdr)
- freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
-
- /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
-
- if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
- /* ss_info.speed_spectrum_percentage -- in unit of 0.01% */
- /* ss.Info.speed_spectrum_rate -- in unit of khz */
- /* CLKS = reference_clock / (2 * speed_spectrum_rate * reference_divider) * 10 */
- /* = reference_clock * 5 / speed_spectrum_rate */
- uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
-
- /* CLKV = 65536 * speed_spectrum_percentage / 2 * spreadSpecrumRate / freq_nom * 4 / 100000 * ((freq_nom / reference_clock) ^ 2) */
- /* = 131 * speed_spectrum_percentage * speed_spectrum_rate / 100 * ((freq_nom / reference_clock) ^ 2) / freq_nom */
- uint32_t clkv =
- (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
- ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
- mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
- }
- }
-
- /* MCLK_PWRMGT_CNTL setup */
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
-
-
- /* Save the result data to outpupt memory level structure */
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static int iceland_populate_single_memory_level(
- struct pp_hwmgr *hwmgr,
- uint32_t memory_clock,
- SMU71_Discrete_MemoryLevel *memory_level
- )
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- int result = 0;
- bool dllStateOn;
- struct cgs_display_info info = {0};
-
-
- if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
- hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
- }
-
- if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE) {
- memory_level->MinVddci = memory_level->MinVddc;
- } else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
- hwmgr->dyn_state.vddci_dependency_on_mclk,
- memory_clock,
- &memory_level->MinVddci);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
- }
-
- if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
- hwmgr->dyn_state.mvdd_dependency_on_mclk, memory_clock, &memory_level->MinMvdd);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find MinMVDD voltage value from memory MVDD voltage dependency table", return result);
- }
-
- memory_level->MinVddcPhases = 1;
-
- if (data->vddc_phase_shed_control) {
- iceland_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
- memory_clock, &memory_level->MinVddcPhases);
- }
-
- memory_level->EnabledForThrottle = 1;
- memory_level->EnabledForActivity = 1;
- memory_level->UpHyst = 0;
- memory_level->DownHyst = 100;
- memory_level->VoltageDownHyst = 0;
-
- /* Indicates maximum activity level for this performance level.*/
- memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
- memory_level->StutterEnable = 0;
- memory_level->StrobeEnable = 0;
- memory_level->EdcReadEnable = 0;
- memory_level->EdcWriteEnable = 0;
- memory_level->RttEnable = 0;
-
- /* default set to low watermark. Highest level will be set to high later.*/
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
- data->display_timing.num_existing_displays = info.display_count;
-
- //if ((data->mclk_stutter_mode_threshold != 0) &&
- // (memory_clock <= data->mclk_stutter_mode_threshold) &&
- // (data->is_uvd_enabled == 0)
- // && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
- // && (data->display_timing.num_existing_displays <= 2)
- // && (data->display_timing.num_existing_displays != 0))
- // memory_level->StutterEnable = 1;
-
- /* decide strobe mode*/
- memory_level->StrobeEnable = (data->mclk_strobe_mode_threshold != 0) &&
- (memory_clock <= data->mclk_strobe_mode_threshold);
-
- /* decide EDC mode and memory clock ratio*/
- if (data->is_memory_GDDR5) {
- memory_level->StrobeRatio = iceland_get_mclk_frequency_ratio(memory_clock,
- memory_level->StrobeEnable);
-
- if ((data->mclk_edc_enable_threshold != 0) &&
- (memory_clock > data->mclk_edc_enable_threshold)) {
- memory_level->EdcReadEnable = 1;
- }
-
- if ((data->mclk_edc_wr_enable_threshold != 0) &&
- (memory_clock > data->mclk_edc_wr_enable_threshold)) {
- memory_level->EdcWriteEnable = 1;
- }
-
- if (memory_level->StrobeEnable) {
- if (iceland_get_mclk_frequency_ratio(memory_clock, 1) >=
- ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf)) {
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- } else {
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
- }
-
- } else {
- dllStateOn = data->dll_defaule_on;
- }
- } else {
- memory_level->StrobeRatio =
- iceland_get_ddr3_mclk_frequency_ratio(memory_clock);
- dllStateOn = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
- }
-
- result = iceland_calculate_mclk_params(hwmgr,
- memory_clock, memory_level, memory_level->StrobeEnable, dllStateOn);
-
- if (0 == result) {
- memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
- memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
- memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
- /* MCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
- /* Indicates maximum activity level for this performance level.*/
- CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
- CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
- }
-
- return result;
-}
-
-/**
- * Populates the SMC MVDD structure using the provided memory clock.
- *
- * @param hwmgr the address of the hardware manager
- * @param mclk the MCLK value to be used in the decision if MVDD should be high or low.
- * @param voltage the SMC VOLTAGE structure to be populated
- */
-int iceland_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk, SMU71_Discrete_VoltageLevel *voltage)
-{
- const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- uint32_t i = 0;
-
- if (ICELAND_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
- /* find mvdd value which clock is more than request */
- for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
- if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
- /* Always round to higher voltage. */
- voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
- "MVDD Voltage is outside the supported range.", return -1);
-
- } else {
- return -1;
- }
-
- return 0;
-}
-
-
-static int iceland_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_DpmTable *table)
-{
- int result = 0;
- const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- SMU71_Discrete_VoltageLevel voltage_level;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
- uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
- uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
-
- /* The ACPI state should not do DPM on DC (or ever).*/
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (data->acpi_vddc)
- table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
- else
- table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pp_table * VOLTAGE_SCALE);
-
- table->ACPILevel.MinVddcPhases = (data->vddc_phase_shed_control) ? 0 : 1;
-
- /* assign zero for now*/
- table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
- table->ACPILevel.SclkFrequency, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* divider ID for required SCLK*/
- table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
- spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
- CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
-
- /* For various features to be enabled/disabled while this level is active.*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
- /* SCLK frequency in units of 10KHz*/
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
-
- table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
- table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
-
- /* CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage);*/
-
- if (0 == iceland_populate_mvdd_value(hwmgr, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd =
- PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinMvdd = 0;
-
- /* Force reset on DLL*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
-
- /* Disable DLL in ACPIState*/
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
- mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
- MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
-
- /* Enable DLL bypass signal*/
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK0_BYPASS, 0);
- dll_cntl = PHM_SET_FIELD(dll_cntl,
- DLL_CNTL, MRDCK1_BYPASS, 0);
-
- table->MemoryACPILevel.DllCntl =
- PP_HOST_TO_SMC_UL(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl =
- PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
- table->MemoryACPILevel.MpllDqFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
- table->MemoryACPILevel.MpllSs1 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
- table->MemoryACPILevel.MpllSs2 =
- PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpHyst = 0;
- table->MemoryACPILevel.DownHyst = 100;
- table->MemoryACPILevel.VoltageDownHyst = 0;
- /* Indicates maximum activity level for this performance level.*/
- table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = 0;
- table->MemoryACPILevel.StrobeEnable = 0;
- table->MemoryACPILevel.EdcReadEnable = 0;
- table->MemoryACPILevel.EdcWriteEnable = 0;
- table->MemoryACPILevel.RttEnable = 0;
-
- return result;
-}
-
-static int iceland_find_boot_level(struct iceland_single_dpm_table *table, uint32_t value, uint32_t *boot_level)
-{
- int result = 0;
- uint32_t i;
-
- for (i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- result = 0;
- }
- }
- return result;
-}
-
-/**
- * Calculates the SCLK dividers using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-int iceland_calculate_sclk_params(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, SMU71_Discrete_GraphicsLevel *sclk)
-{
- const iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- pp_atomctrl_clock_dividers_vi dividers;
- uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
- uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
- uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
- uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t reference_clock;
- uint32_t reference_divider;
- uint32_t fbdiv;
- int result;
-
- /* get the engine clock dividers for this clock value*/
- result = atomctrl_get_engine_pll_dividers_vi(hwmgr, engine_clock, &dividers);
-
- PP_ASSERT_WITH_CODE(result == 0,
- "Error retrieving Engine Clock dividers from VBIOS.", return result);
-
- /* To get FBDIV we need to multiply this by 16384 and divide it by Fref.*/
- reference_clock = atomctrl_get_reference_clock(hwmgr);
-
- reference_divider = 1 + dividers.uc_pll_ref_div;
-
- /* low 14 bits is fraction and high 12 bits is divider*/
- fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
-
- /* SPLL_FUNC_CNTL setup*/
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_REF_DIV, dividers.uc_pll_ref_div);
- spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
- CG_SPLL_FUNC_CNTL, SPLL_PDIV_A, dividers.uc_pll_post_div);
-
- /* SPLL_FUNC_CNTL_3 setup*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_FB_DIV, fbdiv);
-
- /* set to use fractional accumulation*/
- spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3,
- CG_SPLL_FUNC_CNTL_3, SPLL_DITHEN, 1);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
- pp_atomctrl_internal_ss_info ss_info;
-
- uint32_t vcoFreq = engine_clock * dividers.uc_pll_post_div;
- if (0 == atomctrl_get_engine_clock_spread_spectrum(hwmgr, vcoFreq, &ss_info)) {
- /*
- * ss_info.speed_spectrum_percentage -- in unit of 0.01%
- * ss_info.speed_spectrum_rate -- in unit of khz
- */
- /* clks = reference_clock * 10 / (REFDIV + 1) / speed_spectrum_rate / 2 */
- uint32_t clkS = reference_clock * 5 / (reference_divider * ss_info.speed_spectrum_rate);
-
- /* clkv = 2 * D * fbdiv / NS */
- uint32_t clkV = 4 * ss_info.speed_spectrum_percentage * fbdiv / (clkS * 10000);
-
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, CLKS, clkS);
- cg_spll_spread_spectrum =
- PHM_SET_FIELD(cg_spll_spread_spectrum, CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
- cg_spll_spread_spectrum_2 =
- PHM_SET_FIELD(cg_spll_spread_spectrum_2, CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clkV);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
-
- return 0;
-}
-
-static uint8_t iceland_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, uint32_t min_engine_clock_in_sr)
-{
- uint32_t i, temp;
- uint32_t min = (min_engine_clock_in_sr > ICELAND_MINIMUM_ENGINE_CLOCK) ?
- min_engine_clock_in_sr : ICELAND_MINIMUM_ENGINE_CLOCK;
-
- PP_ASSERT_WITH_CODE((engine_clock >= min),
- "Engine clock can't satisfy stutter requirement!", return 0);
-
- for (i = ICELAND_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
- temp = engine_clock / (1 << i);
-
- if(temp >= min || i == 0)
- break;
- }
- return (uint8_t)i;
-}
-
-/**
- * Populates single SMC SCLK structure using the provided engine clock
- *
- * @param hwmgr the address of the hardware manager
- * @param engine_clock the engine clock to use to populate the structure
- * @param sclk the SMC SCLK structure to be populated
- */
-static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
- uint32_t engine_clock, uint16_t sclk_activity_level_threshold,
- SMU71_Discrete_GraphicsLevel *graphic_level)
-{
- int result;
- uint32_t threshold;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
-
-
- /* populate graphics levels*/
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
- hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock, &graphic_level->MinVddc);
- PP_ASSERT_WITH_CODE((0 == result),
- "can not find VDDC voltage value for VDDC engine clock dependency table", return result);
-
- /* SCLK frequency in units of 10KHz*/
- graphic_level->SclkFrequency = engine_clock;
-
- /*
- * Minimum VDDC phases required to support this level, it
- * should get from dependence table.
- */
- graphic_level->MinVddcPhases = 1;
-
- if (data->vddc_phase_shed_control) {
- iceland_populate_phase_value_based_on_sclk(hwmgr,
- hwmgr->dyn_state.vddc_phase_shed_limits_table,
- engine_clock,
- &graphic_level->MinVddcPhases);
- }
-
- /* Indicates maximum activity level for this performance level. 50% for now*/
- graphic_level->ActivityLevel = sclk_activity_level_threshold;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- /* this level can be used if activity is high enough.*/
- graphic_level->EnabledForActivity = 1;
- /* this level can be used for throttling.*/
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpHyst = 0;
- graphic_level->DownHyst = 100;
- graphic_level->VoltageDownHyst = 0;
- graphic_level->PowerThrottle = 0;
-
- threshold = engine_clock * data->fast_watermark_threshold / 100;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- graphic_level->DeepSleepDivId =
- iceland_get_sleep_divider_id_from_clock(hwmgr, engine_clock,
- data->display_timing.min_clock_insr);
- }
-
- /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (0 == result) {
- graphic_level->MinVddc = PP_HOST_TO_SMC_UL(graphic_level->MinVddc * VOLTAGE_SCALE);
- /* CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVoltage);*/
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->MinVddcPhases);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SclkFrequency);
- CONVERT_FROM_HOST_TO_SMC_US(graphic_level->ActivityLevel);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl3);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CgSpllFuncCntl4);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->SpllSpreadSpectrum2);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(graphic_level->CcPwrDynRm1);
- }
-
- return result;
-}
-
-/**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-static int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- struct iceland_dpm_table *dpm_table = &data->dpm_table;
- int result = 0;
- uint32_t level_array_adress = data->dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
-
- uint32_t level_array_size = sizeof(SMU71_Discrete_GraphicsLevel) * SMU71_MAX_LEVELS_GRAPHICS;
- SMU71_Discrete_GraphicsLevel *levels = data->smc_state_table.GraphicsLevel;
- uint32_t i;
- uint8_t highest_pcie_level_enabled = 0, lowest_pcie_level_enabled = 0, mid_pcie_level_enabled = 0, count = 0;
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- result = iceland_populate_single_graphic_level(hwmgr,
- dpm_table->sclk_table.dpm_levels[i].value,
- (uint16_t)data->activity_target[i],
- &(data->smc_state_table.GraphicsLevel[i]));
- if (0 != result)
- return result;
-
- /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */
- if (i > 1)
- data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- }
-
- /* set highest level watermark to high */
- if (dpm_table->sclk_table.count > 1)
- data->smc_state_table.GraphicsLevel[dpm_table->sclk_table.count-1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- data->smc_state_table.GraphicsDpmLevelCount =
- (uint8_t)dpm_table->sclk_table.count;
- data->dpm_level_enable_mask.sclk_dpm_enable_mask =
- iceland_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (highest_pcie_level_enabled + 1))) != 0) {
- highest_pcie_level_enabled++;
- }
-
- while ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << lowest_pcie_level_enabled)) == 0) {
- lowest_pcie_level_enabled++;
- }
-
- while ((count < highest_pcie_level_enabled) &&
- ((data->dpm_level_enable_mask.pcie_dpm_enable_mask &
- (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) {
- count++;
- }
-
- mid_pcie_level_enabled = (lowest_pcie_level_enabled+1+count) < highest_pcie_level_enabled ?
- (lowest_pcie_level_enabled + 1 + count) : highest_pcie_level_enabled;
-
- /* set pcieDpmLevel to highest_pcie_level_enabled*/
- for (i = 2; i < dpm_table->sclk_table.count; i++) {
- data->smc_state_table.GraphicsLevel[i].pcieDpmLevel = highest_pcie_level_enabled;
- }
-
- /* set pcieDpmLevel to lowest_pcie_level_enabled*/
- data->smc_state_table.GraphicsLevel[0].pcieDpmLevel = lowest_pcie_level_enabled;
-
- /* set pcieDpmLevel to mid_pcie_level_enabled*/
- data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
-
- /* level count will send to smc once at init smc table and never change*/
- result = iceland_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
- if (0 != result)
- return result;
-
- return 0;
-}
-
-/**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states
- *
- * @param hwmgr the address of the hardware manager
- */
-
-static int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- struct iceland_dpm_table *dpm_table = &data->dpm_table;
- int result;
- /* populate MCLK dpm table to SMU7 */
- uint32_t level_array_adress = data->dpm_table_start + offsetof(SMU71_Discrete_DpmTable, MemoryLevel);
- uint32_t level_array_size = sizeof(SMU71_Discrete_MemoryLevel) * SMU71_MAX_LEVELS_MEMORY;
- SMU71_Discrete_MemoryLevel *levels = data->smc_state_table.MemoryLevel;
- uint32_t i;
-
- memset(levels, 0x00, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
- "can not populate memory level as memory clock is zero", return -1);
- result = iceland_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
- &(data->smc_state_table.MemoryLevel[i]));
- if (0 != result) {
- return result;
- }
- }
-
- /* Only enable level 0 for now.*/
- data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-
- /*
- * in order to prevent MC activity from stutter mode to push DPM up.
- * the UVD change complements this by putting the MCLK in a higher state
- * by default such that we are not effected by up threshold or and MCLK DPM latency.
- */
- data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
- CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.MemoryLevel[0].ActivityLevel);
-
- data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
- data->dpm_level_enable_mask.mclk_dpm_enable_mask = iceland_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
- /* set highest level watermark to high*/
- data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
-
- /* level count will send to smc once at init smc table and never change*/
- result = iceland_copy_bytes_to_smc(hwmgr->smumgr,
- level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size, data->sram_end);
-
- if (0 != result) {
- return result;
- }
-
- return 0;
-}
-
-struct ICELAND_DLL_SPEED_SETTING
-{
- uint16_t Min; /* Minimum Data Rate*/
- uint16_t Max; /* Maximum Data Rate*/
- uint32_t dll_speed; /* The desired DLL_SPEED setting*/
-};
-
-static int iceland_populate_ulv_level(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *pstate)
-{
- int result = 0;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- uint32_t voltage_response_time, ulv_voltage;
-
- pstate->CcPwrDynRm = 0;
- pstate->CcPwrDynRm1 = 0;
-
- //backbiasResponseTime is use for ULV state voltage value.
- result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
- PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
-
- if(!ulv_voltage) {
- data->ulv.ulv_supported = false;
- return 0;
- }
-
- if (ICELAND_VOLTAGE_CONTROL_BY_SVID2 != data->voltage_control) {
- /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
- if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
- pstate->VddcOffset = 0;
- }
- else {
- /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
- pstate->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
- }
- } else {
- /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
- if(ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v) {
- pstate->VddcOffsetVid = 0;
- } else {
- /* used in SVI2 Mode */
- pstate->VddcOffsetVid = (uint8_t)((hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage) * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
- }
- }
-
- /* used in SVI2 Mode to shed phase */
- pstate->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
-
- if (0 == result) {
- CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm);
- CONVERT_FROM_HOST_TO_SMC_UL(pstate->CcPwrDynRm1);
- CONVERT_FROM_HOST_TO_SMC_US(pstate->VddcOffset);
- }
-
- return result;
-}
-
-static int iceland_populate_ulv_state(struct pp_hwmgr *hwmgr, SMU71_Discrete_Ulv *ulv)
-{
- return iceland_populate_ulv_level(hwmgr, ulv);
-}
-
-static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- uint8_t count, level;
-
- count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
-
- for (level = 0; level < count; level++) {
- if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
- >= data->vbios_boot_state.sclk_bootup_value) {
- data->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
-
- for (level = 0; level < count; level++) {
- if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
- >= data->vbios_boot_state.mclk_bootup_value) {
- data->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-
- return 0;
-}
-
-/**
- * Initializes the SMC table and uploads it
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @param pInput the pointer to input data (PowerState)
- * @return always 0
- */
-static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- SMU71_Discrete_DpmTable *table = &(data->smc_state_table);
- const struct phw_iceland_ulv_parm *ulv = &(data->ulv);
-
- result = iceland_setup_default_dpm_tables(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to setup default DPM tables!", return result;);
- memset(&(data->smc_state_table), 0x00, sizeof(data->smc_state_table));
-
- if (ICELAND_VOLTAGE_CONTROL_NONE != data->voltage_control) {
- iceland_populate_smc_voltage_tables(hwmgr, table);
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition)) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StepVddc)) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
- }
-
- if (data->is_memory_GDDR5) {
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
- }
-
- if (ulv->ulv_supported) {
- result = iceland_populate_ulv_state(hwmgr, &data->ulv_setting);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ULV state!", return result;);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_ULV_PARAMETER, ulv->ch_ulv_parameter);
- }
-
- result = iceland_populate_smc_link_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Link Level!", return result;);
-
- result = iceland_populate_all_graphic_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Graphics Level!", return result;);
-
- result = iceland_populate_all_memory_levels(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize Memory Level!", return result;);
-
- result = iceland_populate_smc_acpi_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACPI Level!", return result;);
-
- result = iceland_populate_smc_vce_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize VCE Level!", return result;);
-
- result = iceland_populate_smc_acp_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize ACP Level!", return result;);
-
- result = iceland_populate_smc_samu_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize SAMU Level!", return result;);
-
- /*
- * Since only the initial state is completely set up at this
- * point (the other states are just copies of the boot state)
- * we only need to populate the ARB settings for the initial
- * state.
- */
- result = iceland_program_memory_timing_parameters(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to Write ARB settings for the initial state.", return result;);
-
- result = iceland_populate_smc_uvd_level(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize UVD Level!", return result;);
-
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- /* find boot level from dpm table */
- result = iceland_find_boot_level(&(data->dpm_table.sclk_table),
- data->vbios_boot_state.sclk_bootup_value,
- (uint32_t *)&(data->smc_state_table.GraphicsBootLevel));
-
- if (result)
- pr_warning("VBIOS did not find boot engine clock value in dependency table.\n");
-
- result = iceland_find_boot_level(&(data->dpm_table.mclk_table),
- data->vbios_boot_state.mclk_bootup_value,
- (uint32_t *)&(data->smc_state_table.MemoryBootLevel));
-
- if (result)
- pr_warning("VBIOS did not find boot memory clock value in dependency table.\n");
-
- table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
- if (ICELAND_VOLTAGE_CONTROL_NONE == data->vdd_ci_control) {
- table->BootVddci = table->BootVddc;
- }
- else {
- table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
- }
- table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
-
- result = iceland_populate_smc_initial_state(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
-
- result = iceland_populate_bapm_parameters_in_dpm_table(hwmgr);
- PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
-
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh =
- (data->thermal_temp_setting.temperature_high *
- ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- table->TemperatureLimitLow =
- (data->thermal_temp_setting.temperature_low *
- ICELAND_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
- table->PCIeGenInterval = 1;
-
- result = iceland_populate_smc_svi2_config(hwmgr, table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to populate SVI2 setting!", return result);
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
- CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
- CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
- CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
- CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
-
- table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
- table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
- table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
-
- /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = iceland_copy_bytes_to_smc(hwmgr->smumgr, data->dpm_table_start +
- offsetof(SMU71_Discrete_DpmTable, SystemFlags),
- (uint8_t *)&(table->SystemFlags),
- sizeof(SMU71_Discrete_DpmTable) - 3 * sizeof(SMU71_PIDController),
- data->sram_end);
-
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to upload dpm data to SMC memory!", return result);
-
- /* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
- result = iceland_copy_bytes_to_smc(hwmgr->smumgr,
- data->ulv_settings_start,
- (uint8_t *)&(data->ulv_setting),
- sizeof(SMU71_Discrete_Ulv),
- data->sram_end);
-
-#if 0
- /* Notify SMC to follow new GPIO scheme */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition)) {
- if (0 == iceland_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_UseNewGPIOScheme))
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
- }
-#endif
-
- return result;
-}
-
-int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr, SMU71_Discrete_MCRegisters *mc_reg_table)
-{
- const struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- uint32_t i, j;
-
- for (i = 0, j = 0; j < data->iceland_mc_reg_table.last; j++) {
- if (data->iceland_mc_reg_table.validflag & 1<<j) {
- PP_ASSERT_WITH_CODE(i < SMU71_DISCRETE_MC_REGISTER_ARRAY_SIZE,
- "Index of mc_reg_table->address[] array out of boundary", return -1);
- mc_reg_table->address[i].s0 =
- PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 =
- PP_HOST_TO_SMC_US(data->iceland_mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (uint8_t)i;
-
- return 0;
-}
-
-/* convert register values from driver to SMC format */
-void iceland_convert_mc_registers(
- const phw_iceland_mc_reg_entry * pEntry,
- SMU71_Discrete_MCRegisterSet *pData,
- uint32_t numEntries, uint32_t validflag)
-{
- uint32_t i, j;
-
- for (i = 0, j = 0; j < numEntries; j++) {
- if (validflag & 1<<j) {
- pData->value[i] = PP_HOST_TO_SMC_UL(pEntry->mc_data[j]);
- i++;
- }
- }
-}
-
-/* find the entry in the memory range table, then populate the value to SMC's iceland_mc_reg_table */
-int iceland_convert_mc_reg_table_entry_to_smc(
- struct pp_hwmgr *hwmgr,
- const uint32_t memory_clock,
- SMU71_Discrete_MCRegisterSet *mc_reg_table_data
- )
-{
- const iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- uint32_t i = 0;
-
- for (i = 0; i < data->iceland_mc_reg_table.num_entries; i++) {
- if (memory_clock <=
- data->iceland_mc_reg_table.mc_reg_table_entry[i].mclk_max) {
- break;
- }
- }
-
- if ((i == data->iceland_mc_reg_table.num_entries) && (i > 0))
- --i;
-
- iceland_convert_mc_registers(&data->iceland_mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, data->iceland_mc_reg_table.last, data->iceland_mc_reg_table.validflag);
-
- return 0;
-}
-
-int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
- SMU71_Discrete_MCRegisters *mc_reg_table)
-{
- int result = 0;
- iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- int res;
- uint32_t i;
-
- for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
- res = iceland_convert_mc_reg_table_entry_to_smc(
- hwmgr,
- data->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_reg_table->data[i]
- );
-
- if (0 != res)
- result = res;
- }
-
- return result;
-}
-
-int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- memset(&data->mc_reg_table, 0x00, sizeof(SMU71_Discrete_MCRegisters));
- result = iceland_populate_mc_reg_address(hwmgr, &(data->mc_reg_table));
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize MCRegTable for the MC register addresses!", return result;);
-
- result = iceland_convert_mc_reg_table_to_smc(hwmgr, &data->mc_reg_table);
- PP_ASSERT_WITH_CODE(0 == result,
- "Failed to initialize MCRegTable for driver state!", return result;);
-
- return iceland_copy_bytes_to_smc(hwmgr->smumgr, data->mc_reg_table_start,
- (uint8_t *)&data->mc_reg_table, sizeof(SMU71_Discrete_MCRegisters), data->sram_end);
-}
-
-int iceland_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
-{
- PPSMC_Msg msg = has_display? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
-
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
-}
-
-int iceland_enable_sclk_control(struct pp_hwmgr *hwmgr)
-{
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, 0);
-
- return 0;
-}
-
-int iceland_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* enable SCLK dpm */
- if (0 == data->sclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_DPM_Enable)),
- "Failed to enable SCLK DPM during DPM Start Function!",
- return -1);
- }
-
- /* enable MCLK dpm */
- if (0 == data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_Enable)),
- "Failed to enable MCLK DPM during DPM Start Function!",
- return -1);
-
- PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, 0x05);/* CH0,1 read */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, 0x05);/* CH2,3 read */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, 0x100005);/*Read */
-
- udelay(10);
-
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC0_CNTL, 0x400005);/* CH0,1 write */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_MC1_CNTL, 0x400005);/* CH2,3 write */
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixLCAC_CPL_CNTL, 0x500005);/* write */
-
- }
-
- return 0;
-}
-
-int iceland_start_dpm(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* enable general power management */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, 1);
- /* enable sclk deep sleep */
- PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, DYNAMIC_PM_EN, 1);
-
- /* prepare for PCIE DPM */
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SOFT_REGISTERS_TABLE_12, VoltageChangeTimeout, 0x1000);
-
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE, SWRST_COMMAND_1, RESETLC, 0x0);
-
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_Voltage_Cntl_Enable)),
- "Failed to enable voltage DPM during DPM Start Function!",
- return -1);
-
- if (0 != iceland_enable_sclk_mclk_dpm(hwmgr)) {
- PP_ASSERT_WITH_CODE(0, "Failed to enable Sclk DPM and Mclk DPM!", return -1);
- }
-
- /* enable PCIE dpm */
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_Enable)),
- "Failed to enable pcie DPM during DPM Start Function!",
- return -1
- );
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
- smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt);
- }
-
- return 0;
-}
-
-static void iceland_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
- uint32_t sources)
-{
- bool protection;
- enum DPM_EVENT_SRC src;
-
- switch (sources) {
- default:
- printk(KERN_ERR "Unknown throttling event sources.");
- /* fall through */
- case 0:
- protection = false;
- /* src is unused */
- break;
- case (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << PHM_AutoThrottleSource_External):
- protection = true;
- src = DPM_EVENT_SRC_EXTERNAL;
- break;
- case (1 << PHM_AutoThrottleSource_External) |
- (1 << PHM_AutoThrottleSource_Thermal):
- protection = true;
- src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
- break;
- }
- /* Order matters - don't enable thermal protection for the wrong source. */
- if (protection) {
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
- DPM_EVENT_SRC, src);
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS,
- !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController));
- } else
- PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
- THERMAL_PROTECTION_DIS, 1);
-}
-
-static int iceland_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
- PHM_AutoThrottleSource source)
-{
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- if (!(data->active_auto_throttle_sources & (1 << source))) {
- data->active_auto_throttle_sources |= 1 << source;
- iceland_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
- }
- return 0;
-}
-
-static int iceland_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
-{
- return iceland_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
-}
-
-static int iceland_tf_start_smc(struct pp_hwmgr *hwmgr)
-{
- int ret = 0;
-
- if (!iceland_is_smc_ram_running(hwmgr->smumgr))
- ret = iceland_smu_start_smc(hwmgr->smumgr);
-
- return ret;
-}
-
-/**
-* Programs the Deep Sleep registers
-*
-* @param pHwMgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data (PhwEvergreen_DisplayConfiguration)
-* @param pOutput the pointer to output data (unused)
-* @param pStorage the pointer to temporary storage (unused)
-* @param Result the last failure code (unused)
-* @return always 0
-*/
-static int iceland_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
-{
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_ON) != 0)
- PP_ASSERT_WITH_CODE(false,
- "Attempt to enable Master Deep Sleep switch failed!",
- return -EINVAL);
- } else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF) != 0)
- PP_ASSERT_WITH_CODE(false,
- "Attempt to disable Master Deep Sleep switch failed!",
- return -EINVAL);
- }
-
- return 0;
-}
-
-static int iceland_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
-{
- int tmp_result, result = 0;
-
- if (cf_iceland_voltage_control(hwmgr)) {
- tmp_result = iceland_enable_voltage_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable voltage control!", return tmp_result);
-
- tmp_result = iceland_construct_voltage_tables(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to contruct voltage tables!", return tmp_result);
- }
-
- tmp_result = iceland_initialize_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize MC reg table!", return tmp_result);
-
- tmp_result = iceland_program_static_screen_threshold_parameters(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program static screen threshold parameters!", return tmp_result);
-
- tmp_result = iceland_enable_display_gap(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable display gap!", return tmp_result);
-
- tmp_result = iceland_program_voting_clients(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to program voting clients!", return tmp_result);
-
- tmp_result = iceland_upload_firmware(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to upload firmware header!", return tmp_result);
-
- tmp_result = iceland_process_firmware_header(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to process firmware header!", return tmp_result);
-
- tmp_result = iceland_initial_switch_from_arb_f0_to_f1(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize switch from ArbF0 to F1!", return tmp_result);
-
- tmp_result = iceland_init_smc_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to initialize SMC table!", return tmp_result);
-
- tmp_result = iceland_populate_initial_mc_reg_table(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate initialize MC Reg table!", return tmp_result);
-
- tmp_result = iceland_populate_pm_fuses(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to populate PM fuses!", return tmp_result);
-
- /* start SMC */
- tmp_result = iceland_tf_start_smc(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start SMC!", return tmp_result);
-
- /* enable SCLK control */
- tmp_result = iceland_enable_sclk_control(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SCLK control!", return tmp_result);
-
- tmp_result = iceland_enable_deep_sleep_master_switch(hwmgr);
- PP_ASSERT_WITH_CODE((tmp_result == 0),
- "Failed to enable deep sleep!", return tmp_result);
-
- /* enable DPM */
- tmp_result = iceland_start_dpm(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to start DPM!", return tmp_result);
-
- tmp_result = iceland_enable_smc_cac(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable SMC CAC!", return tmp_result);
-
- tmp_result = iceland_enable_power_containment(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable power containment!", return tmp_result);
-
- tmp_result = iceland_power_control_set_level(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to power control set level!", result = tmp_result);
-
- tmp_result = iceland_enable_thermal_auto_throttle(hwmgr);
- PP_ASSERT_WITH_CODE((0 == tmp_result),
- "Failed to enable thermal auto throttle!", result = tmp_result);
-
- return result;
-}
-
-static int iceland_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
-{
- return phm_hwmgr_backend_fini(hwmgr);
-}
-
-static void iceland_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- struct phw_iceland_ulv_parm *ulv;
-
- ulv = &data->ulv;
- ulv->ch_ulv_parameter = PPICELAND_CGULVPARAMETER_DFLT;
- data->voting_rights_clients0 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = PPICELAND_VOTINGRIGHTSCLIENTS_DFLT7;
-
- data->static_screen_threshold_unit = PPICELAND_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->static_screen_threshold = PPICELAND_STATICSCREENTHRESHOLD_DFLT;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ABM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_NonABMSupportInPPLib);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicACTiming);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMemoryTransition);
-
- iceland_initialize_power_tune_defaults(hwmgr);
-
- data->mclk_strobe_mode_threshold = 40000;
- data->mclk_stutter_mode_threshold = 30000;
- data->mclk_edc_enable_threshold = 40000;
- data->mclk_edc_wr_enable_threshold = 40000;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMCLS);
-
- data->pcie_gen_performance.max = PP_PCIEGen1;
- data->pcie_gen_performance.min = PP_PCIEGen3;
- data->pcie_gen_power_saving.max = PP_PCIEGen1;
- data->pcie_gen_power_saving.min = PP_PCIEGen3;
-
- data->pcie_lane_performance.max = 0;
- data->pcie_lane_performance.min = 16;
- data->pcie_lane_power_saving.max = 0;
- data->pcie_lane_power_saving.min = 16;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification);
-}
-
-static int iceland_get_evv_voltage(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
- uint16_t virtual_voltage_id;
- uint16_t vddc = 0;
- uint16_t i;
-
- /* the count indicates actual number of entries */
- data->vddc_leakage.count = 0;
- data->vddci_leakage.count = 0;
-
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
- pr_err("Iceland should always support EVV\n");
- return -EINVAL;
- }
-
- /* retrieve voltage for leakage ID (0xff01 + i) */
- for (i = 0; i < ICELAND_MAX_LEAKAGE_COUNT; i++) {
- virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
-
- PP_ASSERT_WITH_CODE((0 == atomctrl_get_voltage_evv(hwmgr, virtual_voltage_id, &vddc)),
- "Error retrieving EVV voltage value!\n", continue);
-
- if (vddc >= 2000)
- pr_warning("Invalid VDDC value!\n");
-
- if (vddc != 0 && vddc != virtual_voltage_id) {
- data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
- data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
- data->vddc_leakage.count++;
- }
- }
-
- return 0;
-}
-
-static void iceland_patch_with_vddc_leakage(struct pp_hwmgr *hwmgr,
- uint32_t *vddc)
-{
- iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- uint32_t leakage_index;
- struct phw_iceland_leakage_voltage *leakage_table = &data->vddc_leakage;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
- /*
- * If this voltage matches a leakage voltage ID, patch
- * with actual leakage voltage.
- */
- if (leakage_table->leakage_id[leakage_index] == *vddc) {
- /*
- * Need to make sure vddc is less than 2v or
- * else, it could burn the ASIC.
- */
- if (leakage_table->actual_voltage[leakage_index] >= 2000)
- pr_warning("Invalid VDDC value!\n");
- *vddc = leakage_table->actual_voltage[leakage_index];
- /* we found leakage voltage */
- break;
- }
- }
-
- if (*vddc >= ATOM_VIRTUAL_VOLTAGE_ID0)
- pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
-}
-
-static void iceland_patch_with_vddci_leakage(struct pp_hwmgr *hwmgr,
- uint32_t *vddci)
-{
- iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- uint32_t leakage_index;
- struct phw_iceland_leakage_voltage *leakage_table = &data->vddci_leakage;
-
- /* search for leakage voltage ID 0xff01 ~ 0xff08 */
- for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
- /*
- * If this voltage matches a leakage voltage ID, patch
- * with actual leakage voltage.
- */
- if (leakage_table->leakage_id[leakage_index] == *vddci) {
- *vddci = leakage_table->actual_voltage[leakage_index];
- /* we found leakage voltage */
- break;
- }
- }
-
- if (*vddci >= ATOM_VIRTUAL_VOLTAGE_ID0)
- pr_warning("Voltage value looks like a Leakage ID but it's not patched\n");
-}
-
-static int iceland_patch_vddc(struct pp_hwmgr *hwmgr,
- struct phm_clock_voltage_dependency_table *tab)
-{
- uint16_t i;
-
- if (tab)
- for (i = 0; i < tab->count; i++)
- iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
-
- return 0;
-}
-
-static int iceland_patch_vddci(struct pp_hwmgr *hwmgr,
- struct phm_clock_voltage_dependency_table *tab)
-{
- uint16_t i;
-
- if (tab)
- for (i = 0; i < tab->count; i++)
- iceland_patch_with_vddci_leakage(hwmgr, &tab->entries[i].v);
-
- return 0;
-}
-
-static int iceland_patch_vce_vddc(struct pp_hwmgr *hwmgr,
- struct phm_vce_clock_voltage_dependency_table *tab)
-{
- uint16_t i;
-
- if (tab)
- for (i = 0; i < tab->count; i++)
- iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
-
- return 0;
-}
-
-
-static int iceland_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
- struct phm_uvd_clock_voltage_dependency_table *tab)
-{
- uint16_t i;
-
- if (tab)
- for (i = 0; i < tab->count; i++)
- iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
-
- return 0;
-}
-
-static int iceland_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
- struct phm_phase_shedding_limits_table *tab)
-{
- uint16_t i;
-
- if (tab)
- for (i = 0; i < tab->count; i++)
- iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].Voltage);
-
- return 0;
-}
-
-static int iceland_patch_samu_vddc(struct pp_hwmgr *hwmgr,
- struct phm_samu_clock_voltage_dependency_table *tab)
-{
- uint16_t i;
-
- if (tab)
- for (i = 0; i < tab->count; i++)
- iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
-
- return 0;
-}
-
-static int iceland_patch_acp_vddc(struct pp_hwmgr *hwmgr,
- struct phm_acp_clock_voltage_dependency_table *tab)
-{
- uint16_t i;
-
- if (tab)
- for (i = 0; i < tab->count; i++)
- iceland_patch_with_vddc_leakage(hwmgr, &tab->entries[i].v);
-
- return 0;
-}
-
-static int iceland_patch_limits_vddc(struct pp_hwmgr *hwmgr,
- struct phm_clock_and_voltage_limits *tab)
-{
- if (tab) {
- iceland_patch_with_vddc_leakage(hwmgr, (uint32_t *)&tab->vddc);
- iceland_patch_with_vddci_leakage(hwmgr, (uint32_t *)&tab->vddci);
- }
-
- return 0;
-}
-
-static int iceland_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
-{
- uint32_t i;
- uint32_t vddc;
-
- if (tab) {
- for (i = 0; i < tab->count; i++) {
- vddc = (uint32_t)(tab->entries[i].Vddc);
- iceland_patch_with_vddc_leakage(hwmgr, &vddc);
- tab->entries[i].Vddc = (uint16_t)vddc;
- }
- }
-
- return 0;
-}
-
-static int iceland_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
-{
- int tmp;
-
- tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
- if(tmp)
- return -EINVAL;
-
- tmp = iceland_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
- if(tmp)
- return -EINVAL;
-
- return 0;
-}
-
-static int iceland_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
- struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
- struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
-
- PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
- "VDDC dependency on SCLK table is missing. This table is mandatory\n", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
- "VDDC dependency on SCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
-
- PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
- "VDDC dependency on MCLK table is missing. This table is mandatory\n", return -EINVAL);
- PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
- "VDD dependency on MCLK table has to have is missing. This table is mandatory\n", return -EINVAL);
-
- data->min_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
- data->max_vddc_in_pp_table = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
-
- hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
- allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
-
- if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
- data->min_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
- data->max_vddci_in_pp_table = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
- }
-
- if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
- hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
-
- return 0;
-}
-
-static int iceland_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size;
- struct phm_clock_voltage_dependency_table *table_clk_vlt;
-
- hwmgr->dyn_state.mclk_sclk_ratio = 4;
- hwmgr->dyn_state.sclk_mclk_delta = 15000; /* 150 MHz */
- hwmgr->dyn_state.vddc_vddci_delta = 200; /* 200mV */
-
- /* initialize vddc_dep_on_dal_pwrl table */
- table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
- table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL);
-
- if (NULL == table_clk_vlt) {
- pr_err("[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
- return -ENOMEM;
- } else {
- table_clk_vlt->count = 4;
- table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
- table_clk_vlt->entries[0].v = 0;
- table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
- table_clk_vlt->entries[1].v = 720;
- table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
- table_clk_vlt->entries[2].v = 810;
- table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
- table_clk_vlt->entries[3].v = 900;
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
- }
-
- return 0;
-}
-
-/**
- * Initializes the Volcanic Islands Hardware Manager
- *
- * @param hwmgr the address of the powerplay hardware manager.
- * @return 1 if success; otherwise appropriate error code.
- */
-static int iceland_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
-{
- int result = 0;
- SMU71_Discrete_DpmTable *table = NULL;
- iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
- bool stay_in_boot;
- struct phw_iceland_ulv_parm *ulv;
- struct cgs_system_info sys_info = {0};
-
- PP_ASSERT_WITH_CODE((NULL != hwmgr),
- "Invalid Parameter!", return -EINVAL;);
-
- data->dll_defaule_on = 0;
- data->sram_end = SMC_RAM_END;
-
- data->activity_target[0] = PPICELAND_TARGETACTIVITY_DFLT;
- data->activity_target[1] = PPICELAND_TARGETACTIVITY_DFLT;
- data->activity_target[2] = PPICELAND_TARGETACTIVITY_DFLT;
- data->activity_target[3] = PPICELAND_TARGETACTIVITY_DFLT;
- data->activity_target[4] = PPICELAND_TARGETACTIVITY_DFLT;
- data->activity_target[5] = PPICELAND_TARGETACTIVITY_DFLT;
- data->activity_target[6] = PPICELAND_TARGETACTIVITY_DFLT;
- data->activity_target[7] = PPICELAND_TARGETACTIVITY_DFLT;
-
- data->mclk_activity_target = PPICELAND_MCLK_TARGETACTIVITY_DFLT;
-
- data->sclk_dpm_key_disabled = 0;
- data->mclk_dpm_key_disabled = 0;
- data->pcie_dpm_key_disabled = 0;
- data->pcc_monitor_enabled = 0;
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UnTabledHardwareInterface);
-
- data->gpio_debug = 0;
- data->engine_clock_data = 0;
- data->memory_clock_data = 0;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleepAboveLow);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPatchPowerState);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- /* Initializes DPM default values. */
- iceland_initialize_dpm_defaults(hwmgr);
-
- /* Enable Platform EVV support. */
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EVV);
-
- /* Get leakage voltage based on leakage ID. */
- result = iceland_get_evv_voltage(hwmgr);
- if (result)
- goto failed;
-
- /**
- * Patch our voltage dependency table with actual leakage
- * voltage. We need to perform leakage translation before it's
- * used by other functions such as
- * iceland_set_hwmgr_variables_based_on_pptable.
- */
- result = iceland_patch_dependency_tables_with_leakage(hwmgr);
- if (result)
- goto failed;
-
- /* Parse pptable data read from VBIOS. */
- result = iceland_set_private_var_based_on_pptale(hwmgr);
- if (result)
- goto failed;
-
- /* ULV support */
- ulv = &(data->ulv);
- ulv->ulv_supported = 1;
-
- /* Initalize Dynamic State Adjustment Rule Settings*/
- result = iceland_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
- if (result) {
- pr_err("[ powerplay ] iceland_initializa_dynamic_state_adjustment_rule_settings failed!\n");
- goto failed;
- }
-
- data->voltage_control = ICELAND_VOLTAGE_CONTROL_NONE;
- data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_NONE;
- data->mvdd_control = ICELAND_VOLTAGE_CONTROL_NONE;
-
- /*
- * Hardcode thermal temperature settings for now, these will
- * be overwritten if a custom policy exists.
- */
- data->thermal_temp_setting.temperature_low = 99500;
- data->thermal_temp_setting.temperature_high = 100000;
- data->thermal_temp_setting.temperature_shutdown = 104000;
- data->uvd_enabled = false;
-
- table = &data->smc_state_table;
-
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- } else {
- table->VRHotGpio = ICELAND_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot);
- }
-
- if (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
- &gpio_pin_assignment)) {
- table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- } else {
- table->AcDcGpio = ICELAND_UNUSED_GPIO_PIN;
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition);
- }
-
- /*
- * If ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, Peak.
- * Current Control feature is enabled and we should program
- * PCC HW register
- */
- if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID,
- &gpio_pin_assignment)) {
- uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__SMC,
- ixCNB_PWRMGT_CNTL);
-
- switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
- case 0:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
- break;
- case 1:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
- break;
- case 2:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
- break;
- case 3:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
- break;
- case 4:
- temp_reg = PHM_SET_FIELD(temp_reg,
- CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
- break;
- default:
- pr_warning("[ powerplay ] Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!\n");
- break;
- }
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCNB_PWRMGT_CNTL, temp_reg);
- }
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableSMU7ThermalManagement);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SMU7);
-
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT))
- data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_SVID2))
- data->voltage_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI,
- VOLTAGE_OBJ_GPIO_LUT))
- data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_VDDCI,
- VOLTAGE_OBJ_SVID2))
- data->vdd_ci_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (data->vdd_ci_control == ICELAND_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC,
- VOLTAGE_OBJ_GPIO_LUT))
- data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
- VOLTAGE_TYPE_MVDDC,
- VOLTAGE_OBJ_SVID2))
- data->mvdd_control = ICELAND_VOLTAGE_CONTROL_BY_SVID2;
- }
-
- if (data->mvdd_control == ICELAND_VOLTAGE_CONTROL_NONE)
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnableMVDDControl);
-
- data->vddc_phase_shed_control = false;
-
- stay_in_boot = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StayInBootState);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPowerManagement);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ActivityReporting);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXClockGatingSupport);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MemorySpreadSpectrumSupport);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EngineSpreadSpectrumSupport);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicPCIEGen2Support);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SMC);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisablePowerGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_BACO);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalAutoThrottling);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableLSClockGating);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuDPM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AcpDPM);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6inACSupport);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EnablePlatformPowerManagement);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PauseMMSessions);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PauseMMSessions);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXClockGatingManagedInCAIL);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_IcelandULPSSWWorkAround);
-
-
- /* iceland doesn't support UVD and VCE */
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_UVD)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
- if (sys_info.value & AMD_PG_SUPPORT_VCE)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
-
- data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
- ICELAND_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
- else
- data->pcie_gen_cap = (uint32_t)sys_info.value;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (result)
- data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
- else
- data->pcie_lane_cap = (uint32_t)sys_info.value;
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- iceland_hwmgr_backend_fini(hwmgr);
- }
-
- return 0;
-failed:
- return result;
-}
-
-static int iceland_get_num_of_entries(struct pp_hwmgr *hwmgr)
-{
- int result;
- unsigned long ret = 0;
-
- result = pp_tables_get_num_of_entries(hwmgr, &ret);
-
- return result ? 0 : ret;
-}
-
-static const unsigned long PhwIceland_Magic = (unsigned long)(PHM_VIslands_Magic);
-
-struct iceland_power_state *cast_phw_iceland_power_state(
- struct pp_hw_power_state *hw_ps)
-{
- if (hw_ps == NULL)
- return NULL;
-
- PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (struct iceland_power_state *)hw_ps;
-}
-
-static int iceland_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
- struct pp_power_state *prequest_ps,
- const struct pp_power_state *pcurrent_ps)
-{
- struct iceland_power_state *iceland_ps =
- cast_phw_iceland_power_state(&prequest_ps->hardware);
-
- uint32_t sclk;
- uint32_t mclk;
- struct PP_Clocks minimum_clocks = {0};
- bool disable_mclk_switching;
- bool disable_mclk_switching_for_frame_lock;
- struct cgs_display_info info = {0};
- const struct phm_clock_and_voltage_limits *max_limits;
- uint32_t i;
- iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- int32_t count;
- int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
-
- data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
-
- PP_ASSERT_WITH_CODE(iceland_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",
- );
-
- max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
- &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
- &(hwmgr->dyn_state.max_clock_voltage_on_dc);
-
- if (PP_PowerSource_DC == hwmgr->power_source) {
- for (i = 0; i < iceland_ps->performance_level_count; i++) {
- if (iceland_ps->performance_levels[i].memory_clock > max_limits->mclk)
- iceland_ps->performance_levels[i].memory_clock = max_limits->mclk;
- if (iceland_ps->performance_levels[i].engine_clock > max_limits->sclk)
- iceland_ps->performance_levels[i].engine_clock = max_limits->sclk;
- }
- }
-
- iceland_ps->vce_clocks.EVCLK = hwmgr->vce_arbiter.evclk;
- iceland_ps->vce_clocks.ECCLK = hwmgr->vce_arbiter.ecclk;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
-
- max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
- stable_pstate_sclk = (max_limits->sclk * 75) / 100;
-
- for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1; count >= 0; count--) {
- if (stable_pstate_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
- stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
- break;
- }
- }
-
- if (count < 0)
- stable_pstate_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
-
- stable_pstate_mclk = max_limits->mclk;
-
- minimum_clocks.engineClock = stable_pstate_sclk;
- minimum_clocks.memoryClock = stable_pstate_mclk;
- }
-
- if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
- minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
-
- if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
- minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
-
- iceland_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
-
- if (0 != hwmgr->gfx_arbiter.sclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.engineClock),
- "Overdrive sclk exceeds limit",
- hwmgr->gfx_arbiter.sclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.engineClock);
-
- if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
- iceland_ps->performance_levels[1].engine_clock = hwmgr->gfx_arbiter.sclk_over_drive;
- }
-
- if (0 != hwmgr->gfx_arbiter.mclk_over_drive) {
- PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= hwmgr->platform_descriptor.overdriveLimit.memoryClock),
- "Overdrive mclk exceeds limit",
- hwmgr->gfx_arbiter.mclk_over_drive = hwmgr->platform_descriptor.overdriveLimit.memoryClock);
-
- if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
- iceland_ps->performance_levels[1].memory_clock = hwmgr->gfx_arbiter.mclk_over_drive;
- }
-
- disable_mclk_switching_for_frame_lock = phm_cap_enabled(
- hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
-
- disable_mclk_switching = (1 < info.display_count) ||
- disable_mclk_switching_for_frame_lock;
-
- sclk = iceland_ps->performance_levels[0].engine_clock;
- mclk = iceland_ps->performance_levels[0].memory_clock;
-
- if (disable_mclk_switching)
- mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count - 1].memory_clock;
-
- if (sclk < minimum_clocks.engineClock)
- sclk = (minimum_clocks.engineClock > max_limits->sclk) ? max_limits->sclk : minimum_clocks.engineClock;
-
- if (mclk < minimum_clocks.memoryClock)
- mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? max_limits->mclk : minimum_clocks.memoryClock;
-
- iceland_ps->performance_levels[0].engine_clock = sclk;
- iceland_ps->performance_levels[0].memory_clock = mclk;
-
- iceland_ps->performance_levels[1].engine_clock =
- (iceland_ps->performance_levels[1].engine_clock >= iceland_ps->performance_levels[0].engine_clock) ?
- iceland_ps->performance_levels[1].engine_clock :
- iceland_ps->performance_levels[0].engine_clock;
-
- if (disable_mclk_switching) {
- if (mclk < iceland_ps->performance_levels[1].memory_clock)
- mclk = iceland_ps->performance_levels[1].memory_clock;
-
- iceland_ps->performance_levels[0].memory_clock = mclk;
- iceland_ps->performance_levels[1].memory_clock = mclk;
- } else {
- if (iceland_ps->performance_levels[1].memory_clock < iceland_ps->performance_levels[0].memory_clock)
- iceland_ps->performance_levels[1].memory_clock = iceland_ps->performance_levels[0].memory_clock;
- }
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) {
- for (i=0; i < iceland_ps->performance_level_count; i++) {
- iceland_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
- iceland_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
- iceland_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
- iceland_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
- }
- }
-
- return 0;
-}
-
-static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
-{
- /*
- * We return the status of Voltage Control instead of checking SCLK/MCLK DPM
- * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
- * whereas voltage control is a fundemental change that will not be disabled
- */
- return (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
- FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) ? 1 : 0);
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int iceland_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
- "Trying to force SCLK when DPM is disabled", return -1;);
- if (0 == data->sclk_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_DPM_ForceState,
- n) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int iceland_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
- PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
- "Trying to Force MCLK when DPM is disabled", return -1;);
- if (0 == data->mclk_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_ForceState,
- n) ? 0 : 1);
-
- return 0;
-}
-
-/**
- * force DPM power State
- *
- * @param hwmgr: the address of the powerplay hardware manager.
- * @param n : DPM level
- * @return The response that came from the SMC.
- */
-int iceland_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
- PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
- "Trying to Force PCIE level when DPM is disabled", return -1;);
- if (0 == data->pcie_dpm_key_disabled)
- return (0 == smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- n) ? 0 : 1);
-
- return 0;
-}
-
-static int iceland_force_dpm_highest(struct pp_hwmgr *hwmgr)
-{
- uint32_t level, tmp;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- if (0 == data->sclk_dpm_key_disabled) {
- /* SCLK */
- if (data->dpm_level_enable_mask.sclk_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
- "force highest sclk dpm state failed!", return -1);
- PHM_WAIT_INDIRECT_FIELD(hwmgr->device,
- SMC_IND, TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX, level);
- }
- }
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- /* MCLK */
- if (data->dpm_level_enable_mask.mclk_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_mclk(hwmgr, level)),
- "force highest mclk dpm state failed!", return -1);
- PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
- TARGET_AND_CURRENT_PROFILE_INDEX, CURR_MCLK_INDEX, level);
- }
- }
- }
-
- if (0 == data->pcie_dpm_key_disabled) {
- /* PCIE */
- if (data->dpm_level_enable_mask.pcie_dpm_enable_mask != 0) {
- level = 0;
- tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- level++ ;
-
- if (0 != level) {
- PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state_pcie(hwmgr, level)),
- "force highest pcie dpm state failed!", return -1);
- }
- }
- }
-
- return 0;
-}
-
-static uint32_t iceland_get_lowest_enable_level(struct pp_hwmgr *hwmgr,
- uint32_t level_mask)
-{
- uint32_t level = 0;
-
- while (0 == (level_mask & (1 << level)))
- level++;
-
- return level;
-}
-
-static int iceland_force_dpm_lowest(struct pp_hwmgr *hwmgr)
-{
- uint32_t level;
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- /* for now force only sclk */
- if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- level = iceland_get_lowest_enable_level(hwmgr,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
-
- PP_ASSERT_WITH_CODE((0 == iceland_dpm_force_state(hwmgr, level)),
- "force sclk dpm state failed!", return -1);
-
- PHM_WAIT_INDIRECT_FIELD(hwmgr->device, SMC_IND,
- TARGET_AND_CURRENT_PROFILE_INDEX,
- CURR_SCLK_INDEX,
- level);
- }
-
- return 0;
-}
-
-int iceland_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
-{
- iceland_hwmgr *data = (iceland_hwmgr *)(hwmgr->backend);
-
- PP_ASSERT_WITH_CODE (0 == iceland_is_dpm_running(hwmgr),
- "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
- return -1);
-
- if (0 == data->sclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
- hwmgr->smumgr,
- PPSMC_MSG_NoForcedLevel)),
- "unforce sclk dpm state failed!",
- return -1);
- }
-
- if (0 == data->mclk_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
- hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_NoForcedLevel)),
- "unforce mclk dpm state failed!",
- return -1);
- }
-
- if (0 == data->pcie_dpm_key_disabled) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
- hwmgr->smumgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel)),
- "unforce pcie level failed!",
- return -1);
- }
-
- return 0;
-}
-
-static int iceland_force_dpm_level(struct pp_hwmgr *hwmgr,
- enum amd_dpm_forced_level level)
-{
- int ret = 0;
-
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = iceland_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = iceland_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- break;
- case AMD_DPM_FORCED_LEVEL_AUTO:
- ret = iceland_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- break;
- default:
- break;
- }
-
- hwmgr->dpm_level = level;
- return ret;
-}
-
-const struct iceland_power_state *cast_const_phw_iceland_power_state(
- const struct pp_hw_power_state *hw_ps)
-{
- if (hw_ps == NULL)
- return NULL;
-
- PP_ASSERT_WITH_CODE((PhwIceland_Magic == hw_ps->magic),
- "Invalid Powerstate Type!",
- return NULL);
-
- return (const struct iceland_power_state *)hw_ps;
-}
-
-static int iceland_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- struct iceland_single_dpm_table *psclk_table = &(data->dpm_table.sclk_table);
- uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
- struct iceland_single_dpm_table *pmclk_table = &(data->dpm_table.mclk_table);
- uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
- struct PP_Clocks min_clocks = {0};
- uint32_t i;
- struct cgs_display_info info = {0};
-
- data->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < psclk_table->count; i++) {
- if (sclk == psclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= psclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- else {
- /*
- * TODO: Check SCLK in DAL's minimum clocks in case DeepSleep
- * divider update is required.
- */
- if(data->display_timing.min_clock_insr != min_clocks.engineClockInSR)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < pmclk_table->count; i++) {
- if (mclk == pmclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= pmclk_table->count)
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- cgs_get_active_displays_info(hwmgr->device, &info);
-
- if (data->display_timing.num_existing_displays != info.display_count)
- data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-
- return 0;
-}
-
-static uint16_t iceland_get_maximum_link_speed(struct pp_hwmgr *hwmgr, const struct iceland_power_state *hw_ps)
-{
- uint32_t i;
- uint32_t pcie_speed, max_speed = 0;
-
- for (i = 0; i < hw_ps->performance_level_count; i++) {
- pcie_speed = hw_ps->performance_levels[i].pcie_gen;
- if (max_speed < pcie_speed)
- max_speed = pcie_speed;
- }
-
- return max_speed;
-}
-
-static uint16_t iceland_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
-{
- uint32_t speed_cntl = 0;
-
- speed_cntl = cgs_read_ind_register(hwmgr->device,
- CGS_IND_REG__PCIE,
- ixPCIE_LC_SPEED_CNTL);
- return((uint16_t)PHM_GET_FIELD(speed_cntl,
- PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
-}
-
-
-static int iceland_request_link_speed_change_before_state_change(struct pp_hwmgr *hwmgr, const void *input)
-{
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- const struct iceland_power_state *iceland_nps = cast_const_phw_iceland_power_state(states->pnew_state);
- const struct iceland_power_state *iceland_cps = cast_const_phw_iceland_power_state(states->pcurrent_state);
-
- uint16_t target_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_nps);
- uint16_t current_link_speed;
-
- if (data->force_pcie_gen == PP_PCIEGenInvalid)
- current_link_speed = iceland_get_maximum_link_speed(hwmgr, iceland_cps);
- else
- current_link_speed = data->force_pcie_gen;
-
- data->force_pcie_gen = PP_PCIEGenInvalid;
- data->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch(target_link_speed) {
- case PP_PCIEGen3:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN3, false))
- break;
- data->force_pcie_gen = PP_PCIEGen2;
- if (current_link_speed == PP_PCIEGen2)
- break;
- case PP_PCIEGen2:
- if (0 == acpi_pcie_perf_request(hwmgr->device, PCIE_PERF_REQ_GEN2, false))
- break;
- default:
- data->force_pcie_gen = iceland_get_current_pcie_speed(hwmgr);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- data->pspp_notify_required = true;
- }
-
- return 0;
-}
-
-static int iceland_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
-{
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if ((0 == data->sclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(
- 0 == iceland_is_dpm_running(hwmgr),
- "Trying to freeze SCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
- "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- if ((0 == data->mclk_dpm_key_disabled) &&
- (data->need_update_smu7_dpm_table &
- DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(0 == iceland_is_dpm_running(hwmgr),
- "Trying to freeze MCLK DPM when DPM is disabled",
- );
- PP_ASSERT_WITH_CODE(
- 0 == smum_send_msg_to_smc(hwmgr->smumgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
- "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
- return -1);
- }
-
- return 0;
-}
-
-static int iceland_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr, const void *input)
-{
- int result = 0;
-
- const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input;
- const struct iceland_power_state *iceland_ps = cast_const_phw_iceland_power_state(states->pnew_state);
- struct iceland_hwmgr *data = (struct iceland_hwmgr *)(hwmgr->backend);
- uint32_t sclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].engine_clock;
- uint32_t mclk = iceland_ps->performance_levels[iceland_ps->performance_level_count-1].memory_clock;
- struct iceland_dpm_table *pdpm_table = &data->dpm_table;
-
- struct iceland_dpm_table *pgolden_dpm_table = &data->golden_dpm_table;
- uint32_t dpm_count, clock_percent;
- uint32_t i;
-
- if (0 == data->need_update_smu7_dpm_table)
- return 0;
-
- if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
- pdpm_table->sclk_table.dpm_levels[pdpm_table->sclk_table.count-1].value = sclk;
-
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->pla