summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHarry Wentland <harry.wentland@amd.com>2015-07-31 22:26:20 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-09-21 17:45:16 -0400
commit7334f3cc72042b5bd1ff91571e8b1f4ab23eedca (patch)
tree994077756e60a46fc898b3f36b168c8a78f725c9
parentb4f181b117dbc578a37500e18a4533335559e9d5 (diff)
drm/amdgpu: Add DM (Display Manager)DAL-wip
This is the interface we use to bridge the drm modesetting APIs with the internal DAL APIs. The idea behind this is to allow us to separate the hw abstractions and programming sequences and the high level APIs. It also allows us to easily change internal APIs without affecting the higher level APIs and allows us to easily support changes to the high level APIs such as adding plane support and transitioning to the atomic modesetting API. Signed-off-by: Harry Wentland <harry.wentland@amd.com>
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c85
-rw-r--r--drivers/gpu/drm/amd/dal/Makefile7
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c232
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c1093
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h172
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c731
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h122
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c1575
-rw-r--r--drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h69
16 files changed, 4151 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 00804b482eb0..ac7b797ae9ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -2,7 +2,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-FULL_AMD_PATH=drivers/gpu/drm/amd
+FULL_AMD_PATH=$(src)/..
DAL_FOLDER_NAME=dal
FULL_AMD_DAL_PATH = $(FULL_AMD_PATH)/$(DAL_FOLDER_NAME)
@@ -107,6 +107,7 @@ RELATIVE_AMD_DAL_PATH = ../$(DAL_FOLDER_NAME)
include $(FULL_AMD_DAL_PATH)/Makefile
amdgpu-y += $(AMD_DAL_FILES)
+
endif
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 57b427f958da..8e4af1667e32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -52,6 +52,7 @@
#include "amdgpu_irq.h"
#include "amdgpu_ucode.h"
#include "amdgpu_gds.h"
+#include "amdgpu_dm.h"
#include "gpu_scheduler.h"
@@ -79,6 +80,7 @@ extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
+extern int amdgpu_dal;
extern int amdgpu_enable_scheduler;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
@@ -2038,6 +2040,7 @@ struct amdgpu_device {
/* display */
struct amdgpu_mode_info mode_info;
+ /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src pageflip_irq;
@@ -2083,6 +2086,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* display related functionality */
+ struct amdgpu_display_manager dm;
+
const struct amdgpu_ip_block_version *ip_blocks;
int num_ip_blocks;
struct amdgpu_ip_block_status *ip_block_status;
@@ -2117,7 +2123,7 @@ void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-
+bool amdgpu_device_has_dal_support(struct amdgpu_device *adev);
/*
* Cast helper
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2d569eccf41f..5dfd5c7d2805 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1356,6 +1356,28 @@ static int amdgpu_resume(struct amdgpu_device *adev)
return 0;
}
+
+/**
+ * amdgpu_device_has_dal_support - check if dal is supported
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dal_support(struct amdgpu_device *adev)
+{
+
+ switch(adev->asic_type) {
+ case CHIP_CARRIZO:
+#ifdef CONFIG_DRM_AMD_DAL && CONFIG_DRM_AMD_DAL_DCE11_0
+ return true;
+#endif
+ default:
+ return false;
+ }
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -1500,8 +1522,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r)
return r;
+
/* init i2c buses */
- amdgpu_atombios_i2c_init(adev);
+ if (amdgpu_dal == 0 || (amdgpu_dal != 0 && !amdgpu_device_has_dal_support(adev)))
+ amdgpu_atombios_i2c_init(adev);
/* Fence driver */
r = amdgpu_fence_driver_init(adev);
@@ -1602,7 +1626,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->ip_block_status = NULL;
adev->accel_working = false;
/* free i2c buses */
- amdgpu_i2c_fini(adev);
+ if (amdgpu_dal == 0 || (amdgpu_dal != 0 && !amdgpu_device_has_dal_support(adev)))
+ amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
kfree(adev->bios);
adev->bios = NULL;
@@ -1746,7 +1771,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
/* blat the mode back in */
if (fbcon) {
- drm_helper_resume_force_mode(dev);
+ if (amdgpu_dal == 0 || (amdgpu_dal != 0 && !amdgpu_device_has_dal_support(adev))) {
+ /* pre DCE11 */
+ drm_helper_resume_force_mode(dev);
+ }
+
/* turn on display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 6bb1c9c5b6f9..7e8722c9652a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -76,6 +76,7 @@ int amdgpu_deep_color = 0;
int amdgpu_vm_size = 8;
int amdgpu_vm_block_size = -1;
int amdgpu_exp_hw_support = 0;
+int amdgpu_dal = 1;
int amdgpu_enable_scheduler = 0;
int amdgpu_sched_jobs = 16;
int amdgpu_sched_hw_submission = 2;
@@ -144,6 +145,9 @@ module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+MODULE_PARM_DESC(dal, "DAL display driver (1 = enable (default), 0 = disable)");
+module_param_named(dal, amdgpu_dal, int, 0444);
+
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable, 0 = disable ((default))");
module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9bc8a0..ecc3e12676d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -35,6 +35,10 @@
#include <linux/pm_runtime.h>
+#ifdef CONFIG_DRM_AMD_DAL
+#include "amdgpu_dm_irq.h"
+#endif
+
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
/*
@@ -229,7 +233,17 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
}
}
- INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
+
+ if (amdgpu_dal == 0 || (amdgpu_dal != 0 && !amdgpu_device_has_dal_support(adev))) {
+ /* pre DCE11 */
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ } else {
+ INIT_WORK(&adev->hotplug_work,
+ amdgpu_hotplug_work_func);
+ }
+
+
INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
adev->irq.installed = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 22367939ebf1..ea9d9584f364 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -677,6 +677,12 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
/* Get associated drm_crtc: */
drmcrtc = &adev->mode_info.crtcs[crtc]->base;
+ if (!drmcrtc) {
+ /* This can occur on driver load if some component fails to
+ * initialize completely and driver is unloaded */
+ DRM_ERROR("Uninitialized crtc %d\n", crtc);
+ return -EINVAL;
+ }
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index b55ceb14fdcd..3573eac99b19 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -71,6 +71,7 @@
#include "uvd_v5_0.h"
#include "uvd_v6_0.h"
#include "vce_v3_0.h"
+#include "amdgpu_dm.h"
/*
* Indirect registers accessor
@@ -1300,6 +1301,80 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
},
};
+/*
+ * This is temporary. After we've gone through full testing with
+ * DAL we want to remove dce_v11
+ */
+#if defined(CONFIG_DRM_AMD_DAL)
+static const struct amdgpu_ip_block_version cz_ip_blocks_dal[] =
+{
+ /* ORDER MATTERS! */
+ {
+ .type = AMD_IP_BLOCK_TYPE_COMMON,
+ .major = 2,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vi_common_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gmc_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_IH,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_ih_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &cz_dpm_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_GFX,
+ .major = 8,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &gfx_v8_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_SDMA,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &sdma_v3_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_UVD,
+ .major = 6,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &uvd_v6_0_ip_funcs,
+ },
+ {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 3,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v3_0_ip_funcs,
+ },
+};
+#endif
+
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
@@ -1316,8 +1391,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
break;
case CHIP_CARRIZO:
+#if defined(CONFIG_DRM_AMD_DAL)
+ if (amdgpu_dal && amdgpu_device_has_dal_support(adev)) {
+ adev->ip_blocks = cz_ip_blocks_dal;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks_dal);
+ } else {
+ adev->ip_blocks = cz_ip_blocks;
+ adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+ }
+#else
adev->ip_blocks = cz_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
+#endif
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/dal/Makefile b/drivers/gpu/drm/amd/dal/Makefile
index d99e75f9b9ca..a1e12fc69633 100644
--- a/drivers/gpu/drm/amd/dal/Makefile
+++ b/drivers/gpu/drm/amd/dal/Makefile
@@ -7,10 +7,9 @@ AMDDALPATH = $(RELATIVE_AMD_DAL_PATH)
subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include -DDAL_CZ_BRINGUP
-DAL_LIBS = adapter amdgpu_dm audio asic_capability basics bios connector \
- controller dcs display_path display_service encoder gpio gpu \
- hw_sequencer i2caux interface link_service mode_manager timing_service \
- topology irq
+DAL_LIBS = adapter amdgpu_dm audio asic_capability basics bios connector controller dcs \
+ display_path display_service encoder gpio gpu hw_sequencer i2caux \
+ interface link_service mode_manager timing_service topology irq
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DAL_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile
index 1709ec8dfb33..8fa9eaec0bfc 100644
--- a/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/Makefile
@@ -2,7 +2,9 @@
# Makefile for the 'dm' sub-component of DAL.
# It provides the control and status of dm blocks.
-AMDGPUDM = amdgpu_dal_services.o
+
+
+AMDGPUDM = amdgpu_dal_services.o amdgpu_dm_types.o amdgpu_dm.o amdgpu_dm_irq.o
AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM))
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c
index 9a4d38161412..571653cc3ac3 100644
--- a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dal_services.c
@@ -32,6 +32,238 @@
#include "amdgpu.h"
#include "dal_services.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "include/dal_interface.h"
+
+/*
+#include "logger_interface.h"
+#include "acpimethod_atif.h"
+#include "amdgpu_powerplay.h"
+#include "amdgpu_notifications.h"
+*/
+
+/******************************************************************************
+ * IRQ Interfaces.
+ *****************************************************************************/
+
+void dal_register_timer_interrupt(
+ struct dal_context *context,
+ struct dal_timer_interrupt_params *int_params,
+ interrupt_handler ih,
+ void *args)
+{
+ struct amdgpu_device *adev = context->driver_context;
+
+ if (!adev || !int_params) {
+ DRM_ERROR("DM_IRQ: invalid input!\n");
+ return;
+ }
+
+ if (int_params->int_context != INTERRUPT_LOW_IRQ_CONTEXT) {
+ /* only low irq ctx is supported. */
+ DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+ int_params->int_context);
+ return;
+ }
+
+ amdgpu_dm_irq_register_timer(adev, int_params, ih, args);
+}
+
+irq_handler_idx dal_register_interrupt(
+ struct dal_context *context,
+ struct dal_interrupt_params *int_params,
+ interrupt_handler ih,
+ void *handler_args)
+{
+ struct amdgpu_device *adev = context->driver_context;
+
+ if (NULL == int_params || NULL == ih) {
+ DRM_ERROR("DM_IRQ: invalid input!\n");
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+ }
+
+ if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) {
+ DRM_ERROR("DM_IRQ: invalid context: %d!\n",
+ int_params->int_context);
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n",
+ int_params->irq_source);
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+ }
+
+ return amdgpu_dm_irq_register_interrupt(adev, int_params, ih,
+ handler_args);
+}
+
+void dal_unregister_interrupt(
+ struct dal_context *context,
+ enum dal_irq_source irq_source,
+ irq_handler_idx handler_idx)
+{
+ struct amdgpu_device *adev = context->driver_context;
+
+ if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+ DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
+ return;
+ }
+
+ if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) {
+ DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source);
+ return;
+ }
+
+ amdgpu_dm_irq_unregister_interrupt(adev, irq_source, handler_idx);
+}
+
+
+void dal_isr_acquire_lock(struct dal_context *context)
+{
+ /*TODO*/
+}
+
+void dal_isr_release_lock(struct dal_context *context)
+{
+ /*TODO*/
+}
+
+/******************************************************************************
+ * End-of-IRQ Interfaces.
+ *****************************************************************************/
+
+bool dal_get_platform_info(struct dal_context *dal_context,
+ struct platform_info_params *params)
+{
+ /*TODO*/
+ return false;
+}
+
+/* Next calls are to power component */
+bool dal_pp_pre_dce_clock_change(struct dal_context *ctx,
+ struct dal_to_power_info *input,
+ struct power_to_dal_info *output)
+{
+ /*TODO*/
+ return false;
+}
+
+bool dal_pp_post_dce_clock_change(struct dal_context *ctx)
+{
+ /*TODO*/
+ return false;
+}
+
+bool dal_get_system_clocks_range(struct dal_context *ctx,
+ struct dal_system_clock_range *sys_clks)
+{
+ /*TODO*/
+ return false;
+}
+
+
+bool dal_pp_set_display_clock(struct dal_context *ctx,
+ struct dal_to_power_dclk *dclk)
+{
+ /* TODO: need power component to provide appropriate interface */
+ return false;
+}
+
+/* end of calls to power component */
+
+/* Calls to notification */
+
+/* dal_notify_hotplug
+ *
+ * Notify display manager for hotplug event
+ *
+ * @param
+ * struct dal_context *dal_context - [in] pointer to specific DAL context
+ *
+ * @return
+ * void
+ * */
+void dal_notify_hotplug(
+ struct dal_context *ctx,
+ uint32_t display_index,
+ bool is_connected)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector = NULL;
+ struct amdgpu_connector *aconnector = NULL;
+
+ /* 1. Update status of drm connectors
+ * 2. Send a uevent and let userspace tell us what to do */
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_connector(connector);
+
+ /*aconnector->connector_id means display_index*/
+ if (aconnector->connector_id != display_index)
+ continue;
+
+ if (is_connected) {
+ drm_mode_connector_update_edid_property(
+ connector,
+ (struct edid *)
+ dal_get_display_edid(
+ adev->dm.dal,
+ display_index,
+ NULL));
+ } else
+ drm_mode_connector_update_edid_property(
+ connector, NULL);
+
+ break;
+ }
+
+ drm_helper_hpd_irq_event(dev);
+}
+
+void dal_notify_capability_change(
+ struct dal_context *ctx,
+ uint32_t display_index)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector = NULL;
+ struct amdgpu_connector *aconnector = NULL;
+
+ /* 1. Update status of drm connectors
+ * 2. Send a uevent and let userspace tell us what to do */
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+ aconnector = to_amdgpu_connector(connector);
+
+ /*aconnector->connector_id means display_index*/
+ if (aconnector->connector_id == display_index) {
+ drm_mode_connector_update_edid_property(
+ connector,
+ (struct edid *)
+ dal_get_display_edid(
+ adev->dm.dal,
+ display_index,
+ NULL));
+ }
+ }
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+void dal_notify_setmode_complete(struct dal_context *ctx,
+ uint32_t h_total,
+ uint32_t v_total,
+ uint32_t h_active,
+ uint32_t v_active,
+ uint32_t pix_clk_in_khz)
+{
+ /*TODO*/
+}
+/* End of calls to notification */
long dal_get_pid(void)
{
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c
new file mode 100644
index 000000000000..55698b533bad
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.c
@@ -0,0 +1,1093 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services_types.h"
+#include "include/dal_interface.h"
+#include "include/mode_query_interface.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_types.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+#include "oss/oss_3_0_d.h"
+#include "oss/oss_3_0_sh_mask.h"
+#include "gmc/gmc_8_1_d.h"
+#include "gmc/gmc_8_1_sh_mask.h"
+
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+
+/* Define variables here
+ * These values will be passed to DAL for feature enable purpose
+ * Disable ALL for HDMI light up
+ * TODO: follow up if need this mechanism*/
+struct dal_override_parameters display_param = {
+ .bool_param_enable_mask = 0,
+ .bool_param_values = 0,
+ .int_param_values[DAL_PARAM_MAX_COFUNC_NON_DP_DISPLAYS] = DAL_PARAM_INVALID_INT,
+ .int_param_values[DAL_PARAM_DRR_SUPPORT] = DAL_PARAM_INVALID_INT,
+};
+
+/* Debug facilities */
+#define AMDGPU_DM_NOT_IMPL(fmt, ...) \
+ DRM_INFO("DM_NOT_IMPL: " fmt, ##__VA_ARGS__)
+
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+ else
+ return dal_get_vblank_counter(adev->dm.dal, crtc);
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+
+ dal_get_crtc_scanoutpos(adev->dm.dal, crtc, vbl, position);
+
+ return 0;
+}
+
+static u32 dm_hpd_get_gpio_reg(struct amdgpu_device *adev)
+{
+ return mmDC_GPIO_HPD_A;
+}
+
+
+static bool dm_is_display_hung(struct amdgpu_device *adev)
+{
+ u32 crtc_hung = 0;
+ u32 i, j, tmp;
+
+ crtc_hung = dal_get_connected_targets_vector(adev->dm.dal);
+
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ int32_t vpos1, hpos1;
+ int32_t vpos2, hpos2;
+
+ tmp = dal_get_crtc_scanoutpos(
+ adev->dm.dal,
+ i,
+ &vpos1,
+ &hpos1);
+ udelay(10);
+ tmp = dal_get_crtc_scanoutpos(
+ adev->dm.dal,
+ i,
+ &vpos2,
+ &hpos2);
+
+ if (hpos1 != hpos2 && vpos1 != vpos2)
+ crtc_hung &= ~(1 << i);
+ }
+ }
+
+ if (crtc_hung == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static void dm_stop_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+}
+
+static void dm_resume_mc_access(struct amdgpu_device *adev,
+ struct amdgpu_mode_mc_save *save)
+{
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static void dm_print_status(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ dev_info(adev->dev, "DCE 10.x registers\n");
+ /* XXX todo */
+}
+
+static int dm_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 srbm_soft_reset = 0, tmp;
+
+ if (dm_is_display_hung(adev))
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+ if (srbm_soft_reset) {
+ dm_print_status(adev);
+
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ dm_print_status(adev);
+ }
+ return 0;
+}
+
+static void amdgpu_dm_pflip_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ enum dal_irq_source src = irq_params->irq_src;
+ unsigned long flags;
+ uint32_t display_index =
+ dal_get_display_index_from_int_src(adev->dm.dal, src);
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[display_index];
+ struct amdgpu_flip_work *works;
+
+ /* IRQ could occur when in initial stage */
+ if(amdgpu_crtc == NULL)
+ return;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ works = amdgpu_crtc->pflip_works;
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
+ "AMDGPU_FLIP_SUBMITTED(%d)\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ return;
+ }
+
+ /* page flip completed. clean up */
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ amdgpu_crtc->pflip_works = NULL;
+
+ /* wakeup usersapce */
+ if(works->event)
+ drm_send_vblank_event(
+ adev->ddev,
+ amdgpu_crtc->crtc_id,
+ works->event);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+
+ drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
+ amdgpu_irq_put(adev, &adev->pageflip_irq, amdgpu_crtc->crtc_id);
+ queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
+}
+
+static void amdgpu_dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ enum dal_irq_source src = irq_params->irq_src;
+
+ uint32_t display_index =
+ dal_get_display_index_from_int_src(adev->dm.dal, src);
+
+ drm_handle_vblank(adev->ddev, display_index);
+}
+
+static void hpd_low_irq_helper_func(
+ void *param,
+ const struct path_mode *pm)
+{
+ uint32_t *display_index = param;
+
+ *display_index = pm->display_path_index;
+}
+
+static inline struct amdgpu_connector *find_connector_by_display_index(
+ struct drm_device *dev,
+ uint32_t display_index)
+{
+ struct drm_connector *connector = NULL;
+ struct amdgpu_connector *aconnector = NULL;
+
+ list_for_each_entry(
+ connector,
+ &dev->mode_config.connector_list,
+ head) {
+ aconnector = to_amdgpu_connector(connector);
+
+ /*aconnector->connector_id means display_index*/
+ if (aconnector->connector_id == display_index)
+ break;
+ }
+
+ return aconnector;
+}
+
+static void amdgpu_dm_hpd_low_irq(void *interrupt_params)
+{
+ struct amdgpu_device *adev = interrupt_params;
+ struct dal *dal = adev->dm.dal;
+ struct drm_device *dev = adev->ddev;
+ uint32_t connected_displays;
+ struct amdgpu_connector *aconnector = NULL;
+ bool trigger_drm_hpd_event = false;
+
+ /* This function runs after dal_notify_hotplug().
+ * That means the user-mode may already called DAL with a Set/Reset
+ * mode, that means this function must acquire the dal_mutex
+ * *before* calling into DAL.
+ * The vice-versa sequence may also happen - this function is
+ * calling into DAL and preempted by a call from user-mode. */
+ mutex_lock(&adev->dm.dal_mutex);
+
+ connected_displays = dal_get_connected_targets_vector(dal);
+
+ if (connected_displays == 0) {
+ uint32_t display_index = INVALID_DISPLAY_INDEX;
+
+ dal_pin_active_path_modes(
+ dal,
+ &display_index,
+ INVALID_DISPLAY_INDEX,
+ hpd_low_irq_helper_func);
+
+ mutex_unlock(&adev->dm.dal_mutex);
+
+ adev->dm.fake_display_index = display_index;
+
+ aconnector =
+ find_connector_by_display_index(dev, display_index);
+
+ if (!aconnector)
+ return;
+
+ /*
+ * force connected status on fake display connector
+ */
+ aconnector->base.status = connector_status_connected;
+
+ /* we need to force user-space notification on changed modes */
+ trigger_drm_hpd_event = true;
+
+ } else if (adev->dm.fake_display_index != INVALID_DISPLAY_INDEX) {
+ /* we assume only one display is connected */
+ uint32_t connected_display_index = 0;
+ struct drm_crtc *crtc;
+
+ mutex_unlock(&adev->dm.dal_mutex);
+
+ /* identify first connected display index */
+ while (connected_displays) {
+ if (1 & connected_displays)
+ break;
+
+ ++connected_display_index;
+ connected_displays >>= 1;
+ }
+
+ aconnector =
+ find_connector_by_display_index(
+ dev,
+ adev->dm.fake_display_index);
+
+ if (!aconnector)
+ return;
+
+ /*
+ * if there is display on another connector get connected
+ * we need to clean-up connection status on fake display
+ */
+ if (connected_display_index != adev->dm.fake_display_index) {
+ /* reset connected status on fake display connector */
+ aconnector->base.status = connector_status_disconnected;
+ } else {
+ crtc = aconnector->base.encoder->crtc;
+
+ DRM_DEBUG_KMS("Setting connector DPMS state to off\n");
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d] set DPMS off\n",
+ aconnector->base.base.id);
+ aconnector->base.funcs->dpms(
+ &aconnector->base, DRM_MODE_DPMS_OFF);
+
+ amdgpu_dm_mode_reset(crtc);
+
+ /*
+ * as mode reset is done for fake display, we should
+ * unreference drm fb and assign NULL pointer to the
+ * primary drm frame, so we will receive full set mode
+ * sequence later
+ */
+
+ drm_framebuffer_unreference(crtc->primary->fb);
+
+ crtc->primary->fb = NULL;
+ }
+
+ adev->dm.fake_display_index = INVALID_DISPLAY_INDEX;
+
+ trigger_drm_hpd_event = true;
+ } else
+ mutex_unlock(&adev->dm.dal_mutex);
+
+ if (true == trigger_drm_hpd_event)
+ drm_kms_helper_hotplug_event(dev);
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void* handle);
+
+
+
+/* Init display KMS
+ *
+ * Returns 0 on success
+ */
+int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dal_init_data init_data;
+ struct drm_device *ddev = adev->ddev;
+ adev->dm.ddev = adev->ddev;
+ adev->dm.adev = adev;
+ adev->dm.fake_display_index = INVALID_DISPLAY_INDEX;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+
+
+ /* initialize DAL's lock (for SYNC context use) */
+ spin_lock_init(&adev->dm.dal_lock);
+
+ /* initialize DAL's mutex */
+ mutex_init(&adev->dm.dal_mutex);
+
+ if(amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ if (ddev->pdev) {
+ init_data.bdf_info.DEVICE_NUMBER = PCI_SLOT(ddev->pdev->devfn);
+ init_data.bdf_info.FUNCTION_NUMBER =
+ PCI_FUNC(ddev->pdev->devfn);
+ if (ddev->pdev->bus)
+ init_data.bdf_info.BUS_NUMBER = ddev->pdev->bus->number;
+ }
+
+ init_data.display_param = display_param;
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.chip_id = adev->rev_id;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+
+ init_data.asic_id.vram_width = adev->mc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+ init_data.asic_id.runtime_flags.bits.SKIP_POWER_DOWN_ON_RESUME = 1;
+
+ if (adev->asic_type == CHIP_CARRIZO)
+ init_data.asic_id.runtime_flags.bits.GNB_WAKEUP_SUPPORTED = 1;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ adev->dm.dal = NULL;
+
+ /* enable gpu scaling in DAL */
+ init_data.display_param.bool_param_enable_mask |=
+ 1 << DAL_PARAM_ENABLE_GPU_SCALING;
+ init_data.display_param.bool_param_values |=
+ 1 << DAL_PARAM_ENABLE_GPU_SCALING;
+
+ adev->dm.dal = dal_create(&init_data);
+
+ if (!adev->dm.dal) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize hw for display support.\n");
+ /* Do not fail and cleanup, try to run without display */
+ }
+
+ if (amdgpu_dm_initialize_drm_device(&adev->dm)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev->ddev->mode_config.cursor_width = 128;
+ adev->ddev->mode_config.cursor_height = 128;
+
+ if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ DRM_INFO("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -1;
+}
+
+void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_destroy_drm_device(&adev->dm);
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+
+ dal_destroy(&adev->dm.dal);
+ return;
+}
+
+/* moved from amdgpu_dm_kms.c */
+void amdgpu_dm_destroy()
+{
+}
+
+/*
+ * amdgpu_dm_get_vblank_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+u32 amdgpu_dm_get_vblank_counter(struct amdgpu_device *adev, int disp_idx)
+{
+ return dal_get_vblank_counter(adev->dm.dal, disp_idx);
+}
+
+static int dm_sw_init(void *handle)
+{
+ return 0;
+}
+
+static int dm_sw_fini(void *handle)
+{
+ return 0;
+}
+
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+
+ return 0;
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ dal_set_power_state(
+ dm->dal,
+ DAL_ACPI_CM_POWER_STATE_D3,
+ DAL_VIDEO_POWER_SUSPEND);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ return 0;
+}
+
+static int dm_resume(void *handle)
+{
+ uint32_t connected_displays_vector;
+ uint32_t prev_connected_displays_vector;
+ uint32_t supported_disp = 0; /* vector of supported displays */
+ uint32_t displays_number;
+ uint32_t current_display_index;
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ uint32_t displays_vector[MAX_COFUNC_PATH];
+
+ dal_set_power_state(
+ dm->dal,
+ DAL_ACPI_CM_POWER_STATE_D0,
+ DAL_VIDEO_POWER_ON);
+
+ prev_connected_displays_vector =
+ dal_get_connected_targets_vector(dm->dal);
+ supported_disp = dal_get_supported_displays_vector(dm->dal);
+
+ /* save previous connected display to reset mode correctly */
+ connected_displays_vector = prev_connected_displays_vector;
+
+ amdgpu_dm_irq_resume(adev);
+
+ dal_resume(dm->dal);
+
+ for (displays_number = 0, current_display_index = 0;
+ connected_displays_vector != 0;
+ connected_displays_vector >>= 1,
+ current_display_index++) {
+ if ((connected_displays_vector & 1) == 1) {
+ struct amdgpu_crtc *crtc =
+ adev->mode_info.crtcs[current_display_index];
+
+ displays_vector[displays_number] =
+ current_display_index;
+
+ memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
+
+ ++displays_number;
+ }
+ }
+
+ mutex_lock(&adev->dm.dal_mutex);
+ dal_reset_path_mode(dm->dal, displays_number, displays_vector);
+ mutex_unlock(&adev->dm.dal_mutex);
+
+ return 0;
+}
+const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .early_init = dm_early_init,
+ .late_init = NULL,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .soft_reset = dm_soft_reset,
+ .print_status = dm_print_status,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ int r;
+ int i;
+
+ /* Register IRQ sources and initialize high IRQ callbacks */
+ struct common_irq_params *c_irq_params;
+ struct dal_interrupt_params int_params = {0};
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.no_mutex_wait = false;
+ int_params.one_shot = false;
+
+ for (i = 7; i < 19; i += 2) {
+ r = amdgpu_irq_add_id(adev, i, &adev->crtc_irq);
+
+ /* High IRQ callback for crtc irq */
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dal_interrupt_to_irq_source(adev->dm.dal, i, 0);
+
+ c_irq_params = &adev->dm.vsync_params[int_params.irq_source - DAL_IRQ_SOURCE_CRTC1VSYNC];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ amdgpu_dm_crtc_high_irq, c_irq_params);
+
+ if (r)
+ return r;
+ }
+
+ for (i = 8; i < 20; i += 2) {
+ r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
+
+ /* High IRQ callback for pflip irq */
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dal_interrupt_to_irq_source(adev->dm.dal, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DAL_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ amdgpu_dm_pflip_high_irq, c_irq_params);
+
+ if (r)
+ return r;
+ }
+
+ /* HPD hotplug */
+ r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ /* High IRQ callback for hpd irq */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source =
+ dal_interrupt_to_irq_source(adev->dm.dal, 42, i);
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ amdgpu_dm_hpd_low_irq, adev);
+ }
+
+ if (r)
+ return r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev->ddev->mode_config.funcs = (void *)&amdgpu_mode_funcs;
+
+ adev->ddev->mode_config.max_width = 16384;
+ adev->ddev->mode_config.max_height = 16384;
+
+ adev->ddev->mode_config.preferred_depth = 24;
+ adev->ddev->mode_config.prefer_shadow = 1;
+
+ adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+
+ r = amdgpu_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ /* this is a part of HPD initialization */
+ drm_kms_helper_poll_init(adev->ddev);
+
+ return r;
+}
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ uint32_t current_display_index = 0;
+ uint32_t connected_displays_vector;
+ uint32_t total_supported_displays_vector;
+
+ if (!dm->dal)
+ return 0;
+
+ connected_displays_vector =
+ dal_get_connected_targets_vector(dm->dal);
+ total_supported_displays_vector =
+ dal_get_supported_displays_vector(dm->dal);
+
+ /* loops over all the connected displays*/
+ for (; total_supported_displays_vector != 0;
+ total_supported_displays_vector >>= 1,
+ connected_displays_vector >>= 1,
+ ++current_display_index) {
+ enum signal_type st;
+
+ if (!(connected_displays_vector & 1))
+ continue;
+
+ st = dal_get_display_signal(dm->dal, current_display_index);
+
+ if (dal_is_embedded_signal(st))
+ break;
+ }
+
+ if (dal_set_backlight_level(
+ dm->dal,
+ current_display_index,
+ bd->props.brightness))
+ return 0;
+ else
+ return 1;
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ return bd->props.brightness;
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+#endif
+
+/* In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+int amdgpu_dm_initialize_drm_device(struct amdgpu_display_manager *dm)
+{
+ int current_display_index = 0;
+ struct amdgpu_connector *aconnector;
+ struct amdgpu_encoder *aencoder;
+ struct amdgpu_crtc *acrtc;
+
+ uint32_t connected_displays_vector =
+ dal_get_connected_targets_vector(dm->dal);
+ uint32_t total_supported_displays_vector =
+ dal_get_supported_displays_vector(dm->dal);
+
+
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("KMS: Failed to initialize mode config\n");
+ return -1;
+ }
+
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
+ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ {
+ struct backlight_device *bd;
+ char bl_name[16];
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(props));
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+ snprintf(bl_name, sizeof(bl_name),
+ "amdgpu_bl%d", dm->adev->ddev->primary->index);
+ bd = backlight_device_register(
+ bl_name,
+ dm->adev->ddev->dev,
+ dm,
+ &amdgpu_dm_backlight_ops,
+ &props);
+ if (!bd) {
+ DRM_ERROR("Backlight registration failed\n");
+ goto fail_backlight_dev;
+ }
+ dm->backlight_dev = bd;
+ }
+#endif
+
+ /* loops over all the connected displays*/
+ for (; total_supported_displays_vector != 0;
+ total_supported_displays_vector >>= 1,
+ connected_displays_vector >>= 1,
+ ++current_display_index) {
+
+ if (current_display_index > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indeces\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail_connector;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail_encoder;
+
+ acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
+ if (!acrtc)
+ goto fail_crtc;
+
+ if (amdgpu_dm_crtc_init(
+ dm,
+ acrtc,
+ current_display_index)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_encoder_init(
+ dm->ddev,
+ aencoder,
+ current_display_index,
+ acrtc)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(
+ dm,
+ aconnector,
+ current_display_index,
+ (connected_displays_vector & 1) == 1,
+ aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+ }
+
+ dm->display_indexes_num = current_display_index;
+ dm->mode_query_option = QUERY_OPTION_NO_PAN;
+
+ return 0;
+
+fail:
+ /* clean any dongling drm structure for the last (corrupted)
+ display target */
+ amdgpu_dm_crtc_destroy(&acrtc->base);
+fail_crtc:
+ amdgpu_dm_encoder_destroy(&aencoder->base);
+fail_encoder:
+ amdgpu_dm_connector_destroy(&aconnector->base);
+fail_connector:
+ backlight_device_unregister(dm->backlight_dev);
+fail_backlight_dev:
+ return -1;
+}
+
+void amdgpu_dm_destroy_drm_device(
+ struct amdgpu_display_manager *dm)
+{
+ drm_mode_config_cleanup(dm->ddev);
+ return;
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+
+static void dm_set_vga_render_state(struct amdgpu_device *adev,
+ bool render)
+{
+ u32 tmp;
+
+ /* Lockout access through VGA aperture*/
+ tmp = RREG32(mmVGA_HDP_CONTROL);
+ if (render)
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
+ else
+ tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ WREG32(mmVGA_HDP_CONTROL, tmp);
+
+ /* disable VGA render */
+ tmp = RREG32(mmVGA_RENDER_CONTROL);
+ if (render)
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
+ else
+ tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
+ WREG32(mmVGA_RENDER_CONTROL, tmp);
+}
+
+/**
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ AMDGPU_DM_NOT_IMPL("%s\n", __func__);
+}
+
+static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
+ u8 level)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+ AMDGPU_DM_NOT_IMPL("%s\n", __func__);
+}
+
+static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
+{
+ /* TODO: translate amdgpu_encoder to display_index and call DAL */
+ AMDGPU_DM_NOT_IMPL("%s\n", __func__);
+ return 0;
+}
+
+/******************************************************************************
+ * Page Flip functions
+ ******************************************************************************/
+/**
+ * dm_page_flip - called by amdgpu_flip_work_func(), which is triggered
+ * via DRM IOCTL, by user mode.
+ *
+ * @adev: amdgpu_device pointer
+ * @crtc_id: crtc to cleanup pageflip on
+ * @crtc_base: new address of the crtc (GPU MC address)
+ *
+ * Does the actual pageflip (surface address update).
+ */
+static void dm_page_flip(struct amdgpu_device *adev,
+ int crtc_id, u64 crtc_base)
+{
+ struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct plane_addr_flip_info flip_info;
+ const unsigned int num_of_planes = 1;
+
+ memset(&flip_info, 0, sizeof(flip_info));
+
+ flip_info.display_index = amdgpu_crtc->crtc_id;
+ flip_info.address_info.address.type = PLN_ADDR_TYPE_GRAPHICS;
+ flip_info.address_info.layer_index = LAYER_INDEX_PRIMARY;
+ flip_info.address_info.flags.bits.ENABLE = 1;
+
+ flip_info.address_info.address.grph.addr.low_part =
+ lower_32_bits(crtc_base);
+
+ flip_info.address_info.address.grph.addr.high_part =
+ upper_32_bits(crtc_base);
+
+ dal_update_plane_addresses(adev->dm.dal, num_of_planes, &flip_info);
+}
+
+static const struct amdgpu_display_funcs display_funcs = {
+ .set_vga_render_state = dm_set_vga_render_state,
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .vblank_wait = NULL, /* not called anywhere */
+ .is_display_hung = dm_is_display_hung,/* called unconditionally */
+ .backlight_set_level =
+ dm_set_backlight_level,/* called unconditionally */
+ .backlight_get_level =
+ dm_get_backlight_level,/* called unconditionally */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = dm_hpd_get_gpio_reg,/* called unconditionally */
+ .page_flip = dm_page_flip, /* called unconditionally */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+ .stop_mc_access = dm_stop_mc_access, /* called unconditionally */
+ .resume_mc_access = dm_resume_mc_access, /* called unconditionally */
+};
+
+static void set_display_funcs(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &display_funcs;
+}
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ set_display_funcs(adev);
+ amdgpu_dm_set_irq_funcs(adev);
+
+ switch (adev->asic_type) {
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ default:
+ DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ /* Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init() */
+
+
+
+ return 0;
+}
+
+
+bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm)
+{
+ /* TODO */
+ return true;
+}
+
+bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm)
+{
+ /* TODO */
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h
new file mode 100644
index 000000000000..78950a25cee9
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_H__
+#define __AMDGPU_DM_H__
+
+/*
+#include "linux/switch.h"
+*/
+
+/*
+ * This file contains the definition for amdgpu_display_manager
+ * and its API for amdgpu driver's use.
+ * This component provides all the display related functionality
+ * and this is the only component that calls DAL API.
+ * The API contained here intended for amdgpu driver use.
+ * The API that is called directly from KMS framework is located
+ * in amdgpu_dm_kms.h file
+ */
+
+#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+/*
+#include "include/amdgpu_dal_power_if.h"
+#include "amdgpu_dm_irq.h"
+*/
+
+#include "irq_types.h"
+
+/* Forward declarations */
+struct amdgpu_device;
+struct drm_device;
+struct amdgpu_dm_irq_handler_data;
+
+struct amdgpu_dm_prev_state {
+ struct drm_framebuffer *fb;
+ int32_t x;
+ int32_t y;
+ struct drm_display_mode mode;
+};
+
+struct common_irq_params {
+ struct amdgpu_device *adev;
+ enum dal_irq_source irq_src;
+};
+
+struct irq_list_head {
+ struct list_head head;
+ /* In case this interrupt needs post-processing, 'work' will be queued*/
+ struct work_struct work;
+};
+
+struct amdgpu_display_manager {
+ struct dal *dal;
+ void *cgs_device;
+ /* lock to be used when DAL is called from SYNC IRQ context */
+ spinlock_t dal_lock;
+
+ struct amdgpu_device *adev; /*AMD base driver*/
+ struct drm_device *ddev; /*DRM base driver*/
+ u16 display_indexes_num;
+
+ u32 mode_query_option;
+
+ struct amdgpu_dm_prev_state prev_state;
+
+ /*
+ * 'irq_source_handler_table' holds a list of handlers
+ * per (DAL) IRQ source.
+ *
+ * Each IRQ source may need to be handled at different contexts.
+ * By 'context' we mean, for example:
+ * - The ISR context, which is the direct interrupt handler.
+ * - The 'deferred' context - this is the post-processing of the
+ * interrupt, but at a lower priority.
+ *
+ * Note that handlers are called in the same order as they were
+ * registered (FIFO).
+ */
+ struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+ struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+
+ struct common_irq_params
+ pflip_params[DAL_IRQ_SOURCE_PFLIP_LAST - DAL_IRQ_SOURCE_PFLIP_FIRST + 1];
+
+ struct common_irq_params
+ vsync_params[DAL_IRQ_SOURCE_CRTC6VSYNC - DAL_IRQ_SOURCE_CRTC1VSYNC + 1];
+
+ /* this spin lock synchronizes access to 'irq_handler_list_table' */
+ spinlock_t irq_handler_list_table_lock;
+
+ /* Timer-related data. */
+ struct list_head timer_handler_list;
+ struct workqueue_struct *timer_workqueue;
+
+ /*
+ * The problem:
+ * We don't get Set Mode call if only one display is connected, and
+ * this display is disconnected and connected back to the same
+ * connector.
+ *
+ * The workaround:
+ * 1. When the last display is disconnected, simulate a hot-plug for a
+ * fake display which has the same EDID as the one which was just
+ * disconnected, but with a mode list reduced to a single mode
+ * (the fail-safe mode) 640x480.
+ * Because of the change in mode-list we do get Set Mode.
+ * 2. When the real display is connected notify the OS about the
+ * new mode-list, which is different from the fake one, because
+ * of the difference the OS calls Set Mode again, which is exactly
+ * what we need. */
+ uint32_t fake_display_index;
+ /* Use dal_mutex for any activity which is NOT syncronized by
+ * DRM mode setting locks.
+ * For example: amdgpu_dm_hpd_low_irq() calls into DAL *without*
+ * DRM mode setting locks being acquired. This is where dal_mutex
+ * is acquired before calling into DAL. */
+ struct mutex dal_mutex;
+
+ struct backlight_device *backlight_dev;
+};
+
+
+/* basic init/fini API */
+int amdgpu_dm_init(struct amdgpu_device *adev);
+
+void amdgpu_dm_fini(struct amdgpu_device *adev);
+
+void amdgpu_dm_destroy(void);
+
+/* initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+int amdgpu_dm_initialize_drm_device(
+ struct amdgpu_display_manager *dm);
+
+/* removes and deallocates the drm structures, created by the above function */
+void amdgpu_dm_destroy_drm_device(
+ struct amdgpu_display_manager *dm);
+
+/* Locking/Mutex */
+bool amdgpu_dm_acquire_dal_lock(struct amdgpu_display_manager *dm);
+
+bool amdgpu_dm_release_dal_lock(struct amdgpu_display_manager *dm);
+
+extern const struct amd_ip_funcs amdgpu_dm_funcs;
+
+#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c
new file mode 100644
index 000000000000..3d5ca5a0cff6
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.c
@@ -0,0 +1,731 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drmP.h>
+
+#include "dal_services_types.h"
+#include "include/dal_interface.h"
+
+#include "amdgpu.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_irq.h"
+#include "include/dal_interface.h"
+
+
+/******************************************************************************
+ * Private declarations.
+ *****************************************************************************/
+
+struct handler_common_data {
+ struct list_head list;
+ interrupt_handler handler;
+ void *handler_arg;
+
+ /* DM which this handler belongs to */
+ struct amdgpu_display_manager *dm;
+};
+
+struct amdgpu_dm_irq_handler_data {
+ struct handler_common_data hcd;
+ /* DAL irq source which registered for this interrupt. */
+ enum dal_irq_source irq_source;
+};
+
+struct amdgpu_dm_timer_handler_data {
+ struct handler_common_data hcd;
+ struct delayed_work d_work;
+};
+
+
+#define DM_IRQ_TABLE_LOCK(adev, flags) \
+ spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags)
+
+#define DM_IRQ_TABLE_UNLOCK(adev, flags) \
+ spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags)
+
+/******************************************************************************
+ * Private functions.
+ *****************************************************************************/
+
+static void init_handler_common_data(
+ struct handler_common_data *hcd,
+ void (*ih)(void *),
+ void *args,
+ struct amdgpu_display_manager *dm)
+{
+ hcd->handler = ih;
+ hcd->handler_arg = args;
+ hcd->dm = dm;
+}
+
+/**
+ * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ *
+ * @work: work struct
+ */
+static void dm_irq_work_func(struct work_struct *work)
+{
+ struct list_head *entry;
+ struct irq_list_head *irq_list_head =
+ container_of(work, struct irq_list_head, work);
+ struct list_head *handler_list = &irq_list_head->head;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+
+ list_for_each(entry, handler_list) {
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n",
+ handler_data->irq_source);
+
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ /* Call a DAL subcomponent which registered for interrupt notification
+ * at INTERRUPT_LOW_IRQ_CONTEXT.
+ * (The most common use is HPD interrupt) */
+}
+
+/**
+ * Remove a handler and return a pointer to hander list from which the
+ * handler was removed.
+ */
+static struct list_head *remove_irq_handler(
+ struct amdgpu_device *adev,
+ void *ih,
+ const struct dal_interrupt_params *int_params)
+{
+ struct list_head *hnd_list;
+ struct list_head *entry, *tmp;
+ struct amdgpu_dm_irq_handler_data *handler;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+ enum dal_irq_source irq_source;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ irq_source = int_params->irq_source;
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_for_each_safe(entry, tmp, hnd_list) {
+
+ handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ if (ih == handler) {
+ /* Found our handler. Remove it from the list. */
+ list_del(&handler->hcd.list);
+ handler_removed = true;
+ break;
+ }
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_removed == false) {
+ /* Not necessarily an error - caller may not
+ * know the context. */
+ return NULL;
+ }
+
+ kfree(handler);
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n",
+ ih, int_params->irq_source, int_params->int_context);
+
+ return hnd_list;
+}
+
+/* If 'handler_in == NULL' then remove ALL handlers. */
+static void remove_timer_handler(
+ struct amdgpu_device *adev,
+ struct amdgpu_dm_timer_handler_data *handler_in)
+{
+ struct amdgpu_dm_timer_handler_data *handler_temp;
+ struct list_head *handler_list;
+ struct list_head *entry, *tmp;
+ unsigned long irq_table_flags;
+ bool handler_removed = false;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ handler_list = &adev->dm.timer_handler_list;
+
+ list_for_each_safe(entry, tmp, handler_list) {
+ /* Note that list_for_each_safe() guarantees that
+ * handler_temp is NOT null. */
+ handler_temp = list_entry(entry,
+ struct amdgpu_dm_timer_handler_data, hcd.list);
+
+ if (handler_in == NULL || handler_in == handler_temp) {
+ list_del(&handler_temp->hcd.list);
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: removing timer handler: %p\n",
+ handler_temp);
+
+ if (handler_in == NULL) {
+ /* Since it is still in the queue, it must
+ * be cancelled. */
+ cancel_delayed_work_sync(&handler_temp->d_work);
+ }
+
+ kfree(handler_temp);
+ handler_removed = true;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+ }
+
+ if (handler_in == NULL) {
+ /* Remove ALL handlers. */
+ continue;
+ }
+
+ if (handler_in == handler_temp) {
+ /* Remove a SPECIFIC handler.
+ * Found our handler - we can stop here. */
+ break;
+ }
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (handler_in != NULL && handler_removed == false) {
+ DRM_ERROR("DM_IRQ: handler: %p is not in the list!\n",
+ handler_in);
+ }
+}
+
+/**
+ * dm_timer_work_func - Handle a timer.
+ *
+ * @work: work struct
+ */
+static void dm_timer_work_func(
+ struct work_struct *work)
+{
+ struct amdgpu_dm_timer_handler_data *handler_data =
+ container_of(work, struct amdgpu_dm_timer_handler_data,
+ d_work.work);
+
+ DRM_DEBUG_KMS("DM_IRQ: work_func: handler_data=%p\n", handler_data);
+
+ /* Call a DAL subcomponent which registered for timer notification. */
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+
+ /* We support only "single shot" timers. That means we must delete
+ * the handler after it was called. */
+ remove_timer_handler(handler_data->hcd.dm->adev, handler_data);
+}
+
+/******************************************************************************
+ * Public functions.
+ *
+ * Note: caller is responsible for input validation.
+ *****************************************************************************/
+
+void *amdgpu_dm_irq_register_interrupt(
+ struct amdgpu_device *adev,
+ struct dal_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args)
+{
+ struct list_head *hnd_list;
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ unsigned long irq_table_flags;
+ enum dal_irq_source irq_source;
+
+ handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+ if (!handler_data) {
+ DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
+ return DAL_INVALID_IRQ_HANDLER_IDX;
+ }
+
+ memset(handler_data, 0, sizeof(*handler_data));
+
+ init_handler_common_data(&handler_data->hcd, ih, handler_args,
+ &adev->dm);
+
+ irq_source = int_params->irq_source;
+
+ handler_data->irq_source = irq_source;
+
+ /* Lock the list, add the handler. */
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ switch (int_params->int_context) {
+ case INTERRUPT_HIGH_IRQ_CONTEXT:
+ hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source];
+ break;
+ case INTERRUPT_LOW_IRQ_CONTEXT:
+ default:
+ hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head;
+ break;
+ }
+
+ list_add_tail(&handler_data->hcd.list, hnd_list);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ /* This pointer will be stored by code which requested interrupt
+ * registration.
+ * The same pointer will be needed in order to unregister the
+ * interrupt. */
+
+ DRM_DEBUG_KMS(
+ "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
+ handler_data,
+ irq_source,
+ int_params->int_context);
+
+ return handler_data;
+}
+
+void amdgpu_dm_irq_unregister_interrupt(
+ struct amdgpu_device *adev,
+ enum dal_irq_source irq_source,
+ void *ih)
+{
+ struct list_head *handler_list;
+ struct dal_interrupt_params int_params;
+ int i;
+
+ memset(&int_params, 0, sizeof(int_params));
+
+ int_params.irq_source = irq_source;
+
+ for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) {
+
+ int_params.int_context = i;
+
+ handler_list = remove_irq_handler(adev, ih, &int_params);
+
+ if (handler_list != NULL)
+ break;
+ }
+
+ if (handler_list == NULL) {
+ /* If we got here, it means we searched all irq contexts
+ * for this irq source, but the handler was not found. */
+ DRM_ERROR(
+ "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
+ ih, irq_source);
+ }
+}
+
+int amdgpu_dm_irq_init(
+ struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+
+ DRM_DEBUG_KMS("DM_IRQ\n");
+
+ spin_lock_init(&adev->dm.irq_handler_list_table_lock);
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+ /* low context handler list init */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ INIT_LIST_HEAD(&lh->head);
+ INIT_WORK(&lh->work, dm_irq_work_func);
+
+ /* high context handler init */
+ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
+ }
+
+ INIT_LIST_HEAD(&adev->dm.timer_handler_list);
+
+ /* allocate and initialize the workqueue for DM timer */
+ adev->dm.timer_workqueue = create_singlethread_workqueue(
+ "dm_timer_queue");
+ if (adev->dm.timer_workqueue == NULL) {
+ DRM_ERROR("DM_IRQ: unable to create timer queue!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void amdgpu_dm_irq_register_timer(
+ struct amdgpu_device *adev,
+ struct dal_timer_interrupt_params *int_params,
+ interrupt_handler ih,
+ void *args)
+{
+ unsigned long jf_delay;
+ struct list_head *handler_list;
+ struct amdgpu_dm_timer_handler_data *handler_data;
+ unsigned long irq_table_flags;
+
+ handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL);
+ if (!handler_data) {
+ DRM_ERROR("DM_IRQ: failed to allocate timer handler!\n");
+ return;
+ }
+
+ memset(handler_data, 0, sizeof(*handler_data));
+
+ init_handler_common_data(&handler_data->hcd, ih, args, &adev->dm);
+
+ INIT_DELAYED_WORK(&handler_data->d_work, dm_timer_work_func);
+
+ /* Lock the list, add the handler. */
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ handler_list = &adev->dm.timer_handler_list;
+
+ list_add_tail(&handler_data->hcd.list, handler_list);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ jf_delay = usecs_to_jiffies(int_params->micro_sec_interval);
+
+ queue_delayed_work(adev->dm.timer_workqueue, &handler_data->d_work,
+ jf_delay);
+
+ DRM_DEBUG_KMS("DM_IRQ: added handler:%p with micro_sec_interval=%llu\n",
+ handler_data, int_params->micro_sec_interval);
+ return;
+}
+
+/* DM IRQ and timer resource release */
+void amdgpu_dm_irq_fini(
+ struct amdgpu_device *adev)
+{
+ int src;
+ struct irq_list_head *lh;
+ DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
+
+ for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
+
+ /* The handler was removed from the table,
+ * it means it is safe to flush all the 'work'
+ * (because no code can schedule a new one). */
+ lh = &adev->dm.irq_handler_list_low_tab[src];
+ flush_work(&lh->work);
+ }
+
+ /* Cancel ALL timers and release handlers (if any). */
+ remove_timer_handler(adev, NULL);
+ /* Release the queue itself. */
+ destroy_workqueue(adev->dm.timer_workqueue);
+}
+
+int amdgpu_dm_irq_suspend(
+ struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h;
+ struct list_head *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: suspend\n");
+
+ /* disable HW interrupt */
+ for (src = DAL_IRQ_SOURCE_HPD1; src <= DAL_IRQ_SOURCE_HPD6; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dal_interrupt_set(adev->dm.dal, src, false);
+
+ flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ return 0;
+}
+
+int amdgpu_dm_irq_resume(
+ struct amdgpu_device *adev)
+{
+ int src;
+ struct list_head *hnd_list_h, *hnd_list_l;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ DRM_DEBUG_KMS("DM_IRQ: resume\n");
+
+ /* re-enable HW interrupt */
+ for (src = DAL_IRQ_SOURCE_HPD1; src <= DAL_IRQ_SOURCE_HPD6; src++) {
+ hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head;
+ hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
+ if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
+ dal_interrupt_set(adev->dm.dal, src, true);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ return 0;
+}
+
+
+/**
+ * amdgpu_dm_irq_schedule_work - schedule all work items registered for the
+ * "irq_source".
+ */
+static void amdgpu_dm_irq_schedule_work(
+ struct amdgpu_device *adev,
+ enum dal_irq_source irq_source)
+{
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ /* Since the caller is interested in 'work_struct' then
+ * the irq will be post-processed at "INTERRUPT_LOW_IRQ_CONTEXT". */
+
+ schedule_work(&adev->dm.irq_handler_list_low_tab[irq_source].work);
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
+/** amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
+ */
+static void amdgpu_dm_irq_immediate_work(
+ struct amdgpu_device *adev,
+ enum dal_irq_source irq_source)
+{
+ struct amdgpu_dm_irq_handler_data *handler_data;
+ struct list_head *entry;
+ unsigned long irq_table_flags;
+
+ DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
+
+ list_for_each(
+ entry,
+ &adev->dm.irq_handler_list_high_tab[irq_source]) {
+
+ handler_data =
+ list_entry(
+ entry,
+ struct amdgpu_dm_irq_handler_data,
+ hcd.list);
+
+ /* Call a subcomponent which registered for immediate
+ * interrupt notification */
+ handler_data->hcd.handler(handler_data->hcd.handler_arg);
+ }
+
+ DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+}
+
+/*
+ * amdgpu_dm_irq_handler
+ *
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+int amdgpu_dm_irq_handler(
+ struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+
+ enum dal_irq_source src =
+ dal_interrupt_to_irq_source(
+ adev->dm.dal,
+ entry->src_id,
+ entry->src_data);
+
+ dal_interrupt_ack(adev->dm.dal, src);
+
+ /* Call high irq work immediately */
+ amdgpu_dm_irq_immediate_work(adev, src);
+ /*Schedule low_irq work */
+ amdgpu_dm_irq_schedule_work(adev, src);
+
+ return 0;
+}
+
+static enum dal_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+{
+ switch (type) {
+ case AMDGPU_HPD_1:
+ return DAL_IRQ_SOURCE_HPD1;
+ case AMDGPU_HPD_2:
+ return DAL_IRQ_SOURCE_HPD2;
+ case AMDGPU_HPD_3:
+ return DAL_IRQ_SOURCE_HPD3;
+ case AMDGPU_HPD_4:
+ return DAL_IRQ_SOURCE_HPD4;
+ case AMDGPU_HPD_5:
+ return DAL_IRQ_SOURCE_HPD5;
+ case AMDGPU_HPD_6:
+ return DAL_IRQ_SOURCE_HPD6;
+ default:
+ return DAL_IRQ_SOURCE_INVALID;
+ }
+}
+
+static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ enum dal_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dal_interrupt_set(adev->dm.dal, src, st);
+ return 0;
+}
+
+static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ enum dal_irq_source src = dal_get_pflip_irq_src_from_display_index(
+ adev->dm.dal,
+ type, /* this is the display_index because passed
+ * via work->crtc_id*/
+ 0 /* plane_no */);
+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dal_interrupt_set(adev->dm.dal, src, st);
+ return 0;
+}
+
+static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ enum dal_irq_source src = dal_get_vblank_irq_src_from_display_index(
+ adev->dm.dal,
+ type);
+ bool st = (state == AMDGPU_IRQ_STATE_ENABLE);
+
+ dal_interrupt_set(adev->dm.dal, src, st);
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
+ .set = amdgpu_dm_set_crtc_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
+ .set = amdgpu_dm_set_pflip_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
+ .set = amdgpu_dm_set_hpd_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+
+ adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
+
+ adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
+}
+
+/*
+ * amdgpu_dm_hpd_init - hpd setup callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup the hpd pins used by the card (evergreen+).
+ * Enable the pin, set the polarity, and enable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector =
+ to_amdgpu_connector(connector);
+ enum dal_irq_source src =
+ amdgpu_dm_hpd_to_dal_irq_source(
+ amdgpu_connector->hpd.hpd);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ /* don't try to enable hpd on eDP or LVDS avoid breaking
+ * the aux dp channel on imac and help (but not
+ * completely fix)
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
+ * also avoid interrupt storms during dpms.
+ */
+ continue;
+ }
+
+ dal_interrupt_set(adev->dm.dal, src, true);
+ amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+}
+
+/**
+ * amdgpu_dm_hpd_fini - hpd tear down callback.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down the hpd pins used by the card (evergreen+).
+ * Disable the hpd interrupts.
+ */
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev->ddev;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct amdgpu_connector *amdgpu_connector =
+ to_amdgpu_connector(connector);
+ enum dal_irq_source src =
+ amdgpu_dm_hpd_to_dal_irq_source(
+ amdgpu_connector->hpd.hpd);
+
+ dal_interrupt_set(adev->dm.dal, src, false);
+ amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
+ }
+}
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h
new file mode 100644
index 000000000000..1f3a956e0aec
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_irq.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DM_IRQ_H__
+#define __AMDGPU_DM_IRQ_H__
+
+#include "include/irq_types.h" /* DAL irq definitions */
+
+/*
+ * Display Manager IRQ-related interfaces (for use by DAL).
+ */
+
+/**
+ * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM initialization.
+ *
+ * Returns:
+ * 0 - success
+ * non-zero - error
+ */
+int amdgpu_dm_irq_init(
+ struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'.
+ *
+ * This function should be called exactly once - during DM destruction.
+ *
+ */
+void amdgpu_dm_irq_fini(
+ struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_register_interrupt - register irq handler for Display block.
+ *
+ * @adev: AMD DRM device
+ * @int_params: parameters for the irq
+ * @ih: pointer to the irq hander function
+ * @handler_args: arguments which will be passed to ih
+ *
+ * Returns:
+ * IRQ Handler Index on success.
+ * NULL on failure.
+ *
+ * Cannot be called from an interrupt handler.
+ */
+void *amdgpu_dm_irq_register_interrupt(
+ struct amdgpu_device *adev,
+ struct dal_interrupt_params *int_params,
+ void (*ih)(void *),
+ void *handler_args);
+
+/**
+ * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered
+ * by amdgpu_dm_irq_register_interrupt().
+ *
+ * @adev: AMD DRM device.
+ * @ih_index: irq handler index which was returned by
+ * amdgpu_dm_irq_register_interrupt
+ */
+void amdgpu_dm_irq_unregister_interrupt(
+ struct amdgpu_device *adev,
+ enum dal_irq_source irq_source,
+ void *ih_index);
+
+void amdgpu_dm_irq_register_timer(
+ struct amdgpu_device *adev,
+ struct dal_timer_interrupt_params *int_params,
+ interrupt_handler ih,
+ void *args);
+
+/**
+ * amdgpu_dm_irq_handler
+ * Generic IRQ handler, calls all registered high irq work immediately, and
+ * schedules work for low irq
+ */
+int amdgpu_dm_irq_handler(
+ struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+
+void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev);
+
+void amdgpu_dm_hpd_init(struct amdgpu_device *adev);
+void amdgpu_dm_hpd_fini(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend.
+ *
+ */
+int amdgpu_dm_irq_suspend(
+ struct amdgpu_device *adev);
+
+/**
+ * amdgpu_dm_irq_resume - enable ASIC interrupt during resume.
+ *
+ */
+int amdgpu_dm_irq_resume(
+ struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_DM_IRQ_H__ */
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c
new file mode 100644
index 000000000000..c689fb8468f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.c
@@ -0,0 +1,1575 @@
+/*
+ * Copyright 2012-13 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services_types.h"
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+// We need to #undef FRAME_SIZE and DEPRECATED because they conflict
+// with ptrace-abi.h's #define's of them.
+#undef FRAME_SIZE
+#undef DEPRECATED
+
+#include "amdgpu_dm_types.h"
+
+#include "include/dal_interface.h"
+#include "include/timing_service_types.h"
+#include "include/set_mode_interface.h"
+#include "include/mode_query_interface.h"
+#include "include/dcs_types.h"
+#include "include/mode_manager_interface.h"
+#include "include/mode_manager_types.h"
+
+/*#include "amdgpu_buffer.h"*/
+
+#include "dce/dce_11_0_d.h"
+#include "dce/dce_11_0_sh_mask.h"
+#include "dce/dce_11_0_enum.h"
+
+void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .reset = NULL,
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static void init_dal_topology(
+ struct amdgpu_display_manager *dm,
+ struct topology *tp,
+ uint32_t current_display_index)
+{
+ DRM_DEBUG_KMS("current_display_index: %d\n", current_display_index);
+
+ tp->display_index[0] = current_display_index;
+ tp->disp_path_num = 1;
+}
+
+
+static enum pixel_format convert_to_dal_pixel_format(uint32_t drm_pf)
+{
+ switch (drm_pf) {
+ case DRM_FORMAT_RGB888:
+ return PIXEL_FORMAT_INDEX8;
+ case DRM_FORMAT_RGB565:
+ return PIXEL_FORMAT_RGB565;
+ case DRM_FORMAT_ARGB8888:
+ return PIXEL_FORMAT_ARGB8888;
+ case DRM_FORMAT_ARGB2101010:
+ return PIXEL_FORMAT_ARGB2101010;
+ default:
+ return PIXEL_FORMAT_ARGB8888;
+ }
+}
+
+static int dm_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
+ uint32_t width, uint32_t height)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_bo *robj;
+ struct cursor_attributes attributes;
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ uint64_t gpu_addr;
+ int ret;
+
+ if ((width > amdgpu_crtc->max_cursor_width) ||
+ (height > amdgpu_crtc->max_cursor_height)) {
+ DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
+ return -EINVAL;
+ }
+
+ robj = gem_to_amdgpu_bo(obj);
+ ret = amdgpu_bo_reserve(robj, false);
+ if (unlikely(ret != 0))
+ return ret;
+ ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM,
+ 0, 0, &gpu_addr);
+ amdgpu_bo_unreserve(robj);
+ if (ret)
+ return ret;
+
+ amdgpu_crtc->cursor_width = width;
+ amdgpu_crtc->cursor_height = height;
+
+ attributes.address.high_part = upper_32_bits(gpu_addr);
+ attributes.address.low_part = lower_32_bits(gpu_addr);
+ attributes.width = width-1;
+ attributes.height = height-1;
+ attributes.x_hot = 0;
+ attributes.y_hot = 0;
+ attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
+ attributes.rotation_angle = 0;
+ attributes.attribute_flags.value = 0;
+
+ dal_set_cursor_attributes(adev->dm.dal, amdgpu_crtc->crtc_id, &attributes);
+
+ return 0;
+}
+
+
+static int dm_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width,
+ uint32_t height)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *robj;
+
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int ret;
+ struct cursor_position position;
+
+ if (!handle) {
+ /* turn off cursor */
+ position.enable = false;
+ position.x = 0;
+ position.y = 0;
+ position.hot_spot_enable = false;
+ dal_set_cursor_position(adev->dm.dal, amdgpu_crtc->crtc_id, &position);
+
+ obj = NULL;
+ goto unpin;
+ }
+
+ obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
+ return -ENOENT;
+ }
+
+ ret = dm_set_cursor(crtc, obj, width, height);
+
+ if (ret) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ret;
+ }
+
+unpin:
+ if (amdgpu_crtc->cursor_bo) {
+ robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ ret = amdgpu_bo_reserve(robj, false);
+ if (likely(ret == 0)) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
+ drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ }
+
+ amdgpu_crtc->cursor_bo = obj;
+ return 0;
+
+}
+
+static int dm_crtc_cursor_move(struct drm_crtc *crtc,
+ int x, int y)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int xorigin = 0, yorigin = 0;
+ struct cursor_position position;
+
+ /* avivo cursor are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ if (x < 0) {
+ xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
+ x = 0;
+ }
+ if (y < 0) {
+ yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
+ y = 0;
+ }
+
+ position.enable = true;
+ position.x = x;
+ position.y = y;
+
+ position.hot_spot_enable = true;
+ position.x_origin = xorigin;
+ position.y_origin = yorigin;
+
+ dal_set_cursor_position(adev->dm.dal, amdgpu_crtc->crtc_id, &position);
+
+ return 0;
+}
+
+static int dm_crtc_cursor_reset(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int ret = 0;
+
+ if (amdgpu_crtc->cursor_bo) {
+ ret = dm_set_cursor(crtc, amdgpu_crtc->cursor_bo,
+ amdgpu_crtc->cursor_width, amdgpu_crtc->cursor_height);
+ }
+
+ return ret;
+}
+
+static void fill_plane_attributes(
+ struct amdgpu_device* adev,
+ struct plane_config *pl_config,
+ struct drm_crtc *crtc) {
+
+ uint64_t tiling_flags;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *rbo;
+ int r;
+ struct amdgpu_framebuffer *amdgpu_fb;
+
+ amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ obj = amdgpu_fb->obj;
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0)){
+ DRM_ERROR("Unable to reserve buffer\n");
+ return;
+ }
+
+ amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+ amdgpu_bo_unreserve(rbo);
+
+ switch (amdgpu_fb->base.pixel_format) {
+ case DRM_FORMAT_C8:
+ pl_config->config.format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ pl_config->config.format =
+ SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ pl_config->config.format =
+ SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ amdgpu_fb->base.bits_per_pixel);
+ return;
+ }
+
+ pl_config->config.tiling_info.value = 0;
+
+ if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
+ unsigned bankw, bankh, mtaspect, tile_split, num_banks;
+
+ bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
+ bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
+ mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
+ tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
+ num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
+
+
+ /* XXX fix me for VI */
+ pl_config->config.tiling_info.grph.NUM_BANKS = num_banks;
+ pl_config->config.tiling_info.grph.ARRAY_MODE = ARRAY_2D_TILED_THIN1;
+ pl_config->config.tiling_info.grph.TILE_SPLIT = tile_split;
+ pl_config->config.tiling_info.grph.BANK_WIDTH = bankw;
+ pl_config->config.tiling_info.grph.BANK_HEIGHT = bankh;
+ pl_config->config.tiling_info.grph.TILE_ASPECT = mtaspect;
+ pl_config->config.tiling_info.grph.TILE_MODE = ADDR_SURF_MICRO_TILING_DISPLAY;
+ } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
+ pl_config->config.tiling_info.grph.ARRAY_MODE = ARRAY_1D_TILED_THIN1;
+ }
+
+ pl_config->config.tiling_info.grph.PIPE_CONFIG =
+ AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
+
+ pl_config->config.plane_size.grph.surface_size.x = 0;
+ pl_config->config.plane_size.grph.surface_size.y = 0;
+ pl_config->config.plane_size.grph.surface_size.width = amdgpu_fb->base.width;
+ pl_config->config.plane_size.grph.surface_size.height = amdgpu_fb->base.height;
+ pl_config->config.plane_size.grph.surface_pitch =
+ amdgpu_fb->base.pitches[0] / (amdgpu_fb->base.bits_per_pixel / 8);
+
+ /* TODO ACHTUNG ACHTUNG - NICHT SCHIESSEN
+ * Correctly program src, dst, and clip */
+ pl_config->attributes.dst_rect.width = crtc->mode.hdisplay;
+ pl_config->attributes.dst_rect.height = crtc->mode.vdisplay;
+ pl_config->attributes.dst_rect.x = 0;
+ pl_config->attributes.dst_rect.y = 0;
+ pl_config->attributes.clip_rect = pl_config->attributes.dst_rect;
+ pl_config->attributes.src_rect = pl_config->attributes.dst_rect;
+
+ pl_config->mp_scaling_data.viewport.x = crtc->x;
+ pl_config->mp_scaling_data.viewport.y = crtc->y;
+ pl_config->mp_scaling_data.viewport.width = crtc->mode.hdisplay;
+ pl_config->mp_scaling_data.viewport.height = crtc->mode.vdisplay;
+
+ pl_config->config.rotation = ROTATION_ANGLE_0;
+ pl_config->config.layer_index = LAYER_INDEX_PRIMARY;
+ pl_config->config.enabled = 1;
+ pl_config->mask.bits.SURFACE_CONFIG_IS_VALID = 1;
+
+}
+
+/* this function will be called in the future from other files as well */
+void amdgpu_dm_fill_surface_address(struct drm_crtc *crtc,
+ struct plane_addr_flip_info *info,
+ struct amdgpu_framebuffer *afb,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *rbo;
+ uint64_t fb_location;
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int r;
+
+ info->address_info.address.type = PLN_ADDR_TYPE_GRAPHICS;
+ //DD-ToDo
+ // info->flip_immediate = amdgpu_pflip_vsync ? false : true;
+
+ info->address_info.layer_index = LAYER_INDEX_PRIMARY;
+ info->address_info.flags.bits.ENABLE = 1;
+
+ /*Get fb location*/
+ /* no fb bound */
+ if (!crtc->primary->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return ;
+ }
+
+ DRM_DEBUG_KMS("Pin new framebuffer: %p\n", afb);
+ obj = afb->obj;
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return;
+
+ r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+
+ amdgpu_bo_unreserve(rbo);
+
+ if (unlikely(r != 0)) {
+ DRM_ERROR("Failed to pin framebuffer\n");
+ return ;
+ }
+
+ info->address_info.address.grph.addr.low_part =
+ lower_32_bits(fb_location);
+ info->address_info.address.grph.addr.high_part =
+ upper_32_bits(fb_location);
+
+ dal_update_plane_addresses(adev->dm.dal, 1, info);
+
+ /* unpin the old FB if surface change*/
+ if (old_fb && old_fb != crtc->primary->fb) {
+ struct amdgpu_framebuffer *afb;
+ /*struct amdgpu_bo *rbo;
+ int r;*/
+
+ afb = to_amdgpu_framebuffer(old_fb);
+ DRM_DEBUG_KMS("Unpin old framebuffer: %p\n", afb);
+ rbo = gem_to_amdgpu_bo(afb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ } else {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ }
+ }
+}
+
+
+/**
+ * drm_crtc_set_mode - set a mode
+ * @crtc: CRTC to program
+ * @mode: mode to use
+ * @x: width of mode
+ * @y: height of mode
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
+ * to fixup or reject the mode prior to trying to set it.
+ *
+ * RETURNS:
+ * True if the mode was set successfully, or false otherwise.
+ */
+bool amdgpu_dm_mode_set(
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x,
+ int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_display_manager *dm =
+ &((struct amdgpu_device *)crtc->dev->dev_private)->dm;
+ struct drm_device *dev = crtc->dev;
+ struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+ int saved_x, saved_y;
+ bool ret = true;
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct plane_config pl_config = { { { 0 } } };
+ struct plane_addr_flip_info addr_flip_info = { 0 };
+ struct amdgpu_framebuffer *afb = NULL;
+ int num_planes = 1;
+ struct mode_query *mq;
+ const struct path_mode_set *pms;
+
+ DRM_DEBUG_KMS("amdgpu_dm_mode_set called\n");
+
+ if (!adev->dm.dal)
+ return false;
+
+ adjusted_mode = drm_mode_duplicate(dev, mode);
+ if (!adjusted_mode)
+ return false;
+
+ /* for now, no support for atomic mode_set, thus old_fb is not used */
+ afb = to_amdgpu_framebuffer(crtc->primary->fb);
+
+ saved_hwmode = crtc->hwmode;
+ saved_mode = crtc->mode;
+ saved_x = crtc->x;
+ saved_y = crtc->y;
+
+ /* Update crtc values up front so the driver can rely on them for mode
+ * setting.
+ */
+ crtc->mode = *mode;
+ crtc->x = x;
+ crtc->y = y;
+
+ DRM_DEBUG_KMS("[CRTC: %d, DISPLAY_IDX: %d]\n",
+ crtc->base.id, acrtc->crtc_id);
+
+ /* Currently no support for atomic mode set */
+ {
+ struct render_mode rm;
+ struct refresh_rate rf = { 0 };
+ struct topology tp;
+ init_dal_topology(dm, &tp, acrtc->crtc_id);
+ rm.view.width = mode->hdisplay;
+ rm.view.height = mode->vdisplay;
+ rm.pixel_format =
+ convert_to_dal_pixel_format(crtc->primary->fb->pixel_format);
+ rf.field_rate = drm_mode_vrefresh(mode);
+ rf.VIDEO_OPTIMIZED_RATE = 0;
+ rf.INTERLACED = (mode->flags & DRM_MODE_FLAG_INTERLACE) != 0;
+
+ mq = dal_get_mode_query(
+ adev->dm.dal,
+ &tp,
+ dm->mode_query_option);
+ if (!mq)
+ return false;
+
+ if (!dal_mode_query_select_render_mode(mq, &rm)) {
+ dal_mode_query_destroy(&mq);
+ DRM_ERROR("dal_mode_query_select_render_mode failed\n");
+ return -1;
+ }
+
+ if (!dal_mode_query_select_refresh_rate(mq, &rf)) {
+ dal_mode_query_destroy(&mq);
+ DRM_ERROR("dal_mode_query_select_refresh_rate failed\n");
+ return false;
+ }
+ pms = dal_mode_query_get_current_path_mode_set(mq);
+
+ if (!pms) {
+ dal_mode_query_destroy(&mq);
+ DRM_ERROR("dal_mode_query_get_current_path_mode_set failed\n");
+ return false;
+ }
+ }
+
+
+ dal_set_blanking(adev->dm.dal, acrtc->crtc_id, true);
+ /* the actual mode set call */
+ ret = dal_set_path_mode(adev->dm.dal, pms);
+
+ dal_mode_query_destroy(&mq);
+
+ /* Surface programming */
+ pl_config.display_index = acrtc->crtc_id;
+ addr_flip_info.display_index = acrtc->crtc_id;
+
+ fill_plane_attributes(adev, &pl_config, crtc);
+
+ dal_setup_plane_configurations(adev->dm.dal, num_planes, &pl_config);
+
+ /*programs the surface addr and flip control*/
+ amdgpu_dm_fill_surface_address(crtc, &addr_flip_info, afb, old_fb);
+
+ dal_set_blanking(adev->dm.dal, acrtc->crtc_id, false);
+ /* Turn vblank on after reset */
+ drm_crtc_vblank_on(crtc);
+
+ if (ret) {
+ /* Store real post-adjustment hardware mode. */
+ crtc->hwmode = *adjusted_mode;
+ crtc->enabled = true;
+
+ /* Calculate and store various constants which
+ * are later needed by vblank and swap-completion
+ * timestamping. They are derived from true hwmode.
+ */
+ drm_calc_timestamping_constants(crtc, &crtc->hwmode);
+ }
+
+ drm_mode_destroy(dev, adjusted_mode);
+ if (!ret) {
+ crtc->hwmode = saved_hwmode;
+ crtc->mode = saved_mode;
+ crtc->x = saved_x;
+ crtc->y = saved_y;
+ }
+
+ return true;
+}
+
+bool amdgpu_dm_mode_reset(struct drm_crtc *crtc)
+{
+ bool ret;
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ uint32_t display_index = acrtc->crtc_id;
+
+ /* Turn vblank off before reset */
+ drm_crtc_vblank_off(crtc);
+
+ /* When we will get called from drm asking to reset mode
+ * when fb is null, it will lead us reset mode unnecessarily.
+ * So change the sequence, we won't do the actual reset mode call
+ * when display is connected, as that's harmful for glitchless mode
+ * change (when we only reprogram pipe front end). */
+ if ((dal_get_connected_targets_vector(adev->dm.dal)
+ & (1 << display_index)) &&
+ adev->dm.fake_display_index == INVALID_DISPLAY_INDEX) {
+
+ /*
+ * Blank the display, as buffer will be invalidated.
+ * For the else case it would be done as part of dal reset mode
+ * sequence.
+ */
+ mutex_lock(&adev->dm.dal_mutex);
+ dal_set_blanking(adev->dm.dal, acrtc->crtc_id, true);
+ mutex_unlock(&adev->dm.dal_mutex);
+
+ ret = true;
+ DRM_DEBUG_KMS(
+ "Skip reset mode for disp_index %d\n",
+ display_index);
+ } else {
+ mutex_lock(&adev->dm.dal_mutex);
+ ret = dal_reset_path_mode(adev->dm.dal, 1, &display_index);
+ mutex_unlock(&adev->dm.dal_mutex);
+ DRM_DEBUG_KMS(
+ "Do reset mode for disp_index %d\n",
+ display_index);
+ }
+
+ /* unpin the FB */
+ if (crtc->primary->fb) {
+ struct amdgpu_framebuffer *afb;
+ struct amdgpu_bo *rbo;
+ int r;
+
+ afb = to_amdgpu_framebuffer(crtc->primary->fb);
+ DRM_DEBUG_KMS("Unpin old framebuffer: %p\n", afb);
+ rbo = gem_to_amdgpu_bo(afb->obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r))
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ else {
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ }
+ }
+
+ if (ret)
+ crtc->enabled = false;
+
+ return ret;
+}
+
+/**
+ * amdgpu_dm_set_config - set a new config from userspace
+ * @crtc: CRTC to setup
+ * @crtc_info: user provided configuration
+ * @new_mode: new mode to set
+ * @connector_set: set of connectors for the new config
+ * @fb: new framebuffer
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Setup a new configuration, provided by the user in @crtc_info, and enable
+ * it.
+ *
+ * RETURNS:
+ * Zero on success
+ */
+int amdgpu_dm_set_config(struct drm_mode_set *set)
+{
+ /* TODO:
+ * Save + restore mode + fb info
+ * Call dm_set_mode to do the folloring:
+ * Fill Modes
+ * Set Mode
+ * SetPlaneConfig
+ * UpdatePlaneAddress
+ * FillPlaneAttributes
+ */
+
+ struct drm_device *dev;
+ struct amdgpu_device *adev;
+ struct drm_crtc *save_crtcs, *crtc;
+ struct drm_encoder *save_encoders, *encoder, *new_encoder;
+ bool mode_changed = false; /* if true do a full mode set */
+ bool fb_changed = false; /* if true and !mode_changed just do a flip */
+ /* if true and mode_changed do reset_mode */
+ struct drm_connector *save_connectors, *connector;
+ const struct drm_connector_helper_funcs *connector_funcs;
+ struct amdgpu_crtc *acrtc = NULL;
+ int count = 0, fail = 0;
+ struct drm_mode_set save_set;
+ int ret = 0;
+ int i;
+
+ DRM_DEBUG_KMS("\n");
+ DRM_DEBUG_KMS("--- DM set_config called ---\n");
+
+ BUG_ON(!set);
+ BUG_ON(!set->crtc);
+ BUG_ON(!set->crtc->helper_private);
+
+ /* Enforce sane interface api - has been abused by the fb helper. */
+ BUG_ON(!set->mode && set->fb);
+ BUG_ON(set->fb && set->num_connectors == 0);
+
+ if (set->num_connectors > 1) {
+ DRM_ERROR("Trying to set %zu connectors, but code only assumes max of one\n",
+ set->num_connectors);
+ return -EINVAL;
+ }
+
+ dev = set->crtc->dev;
+ adev = dev->dev_private;
+
+ if (!set->mode)
+ set->fb = NULL;
+
+ /* Allocate space for the backup of all (non-pointer) crtc, encoder and
+ * connector data. */
+ save_crtcs = kzalloc(dev->mode_config.num_crtc *
+ sizeof(struct drm_crtc), GFP_KERNEL);
+ if (!save_crtcs)
+ return -ENOMEM;
+
+ save_encoders = kzalloc(dev->mode_config.num_encoder *
+ sizeof(struct drm_encoder), GFP_KERNEL);
+ if (!save_encoders) {
+ kfree(save_crtcs);
+ return -ENOMEM;
+ }
+
+ save_connectors = kzalloc(dev->mode_config.num_connector *
+ sizeof(struct drm_connector), GFP_KERNEL);
+ if (!save_connectors) {
+ kfree(save_encoders);
+ kfree(save_crtcs);
+ return -ENOMEM;
+ }
+
+ /* Copy data. Note that driver private data is not affected.
+ * Should anything bad happen only the expected state is
+ * restored, not the drivers personal bookkeeping.
+ */
+ count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ save_crtcs[count++] = *crtc;
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ save_encoders[count++] = *encoder;
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ save_connectors[count++] = *connector;
+ }
+
+
+ if (set->fb) {
+ DRM_DEBUG_KMS(
+ "[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ /*TODO: Move mode reset to crtc->disable instead, and
+ call drm_helper_disable_unused_functions here?*/
+
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+
+ DRM_DEBUG_KMS("Setting connector DPMS state to off\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS(
+ "\t[CONNECTOR:%d] set DPMS off\n",
+ set->connectors[i]->base.id);
+
+ set->connectors[i]->funcs->dpms(
+ set->connectors[i], DRM_MODE_DPMS_OFF);
+
+ }
+
+ if (!amdgpu_dm_mode_reset(set->crtc)) {
+ DRM_ERROR("### Failed to reset mode on [CRTC:%d] ###\n",
+ set->crtc->base.id);
+ ret = -EINVAL;
+ goto fail;
+ }
+ DRM_DEBUG_KMS("=== Early exit dm_set_config ===\n");
+ return 0;
+ }
+
+ save_set.crtc = set->crtc;
+ save_set.mode = &set->crtc->mode;
+ save_set.x = set->crtc->x;
+ save_set.y = set->crtc->y;
+ save_set.fb = set->crtc->primary->fb;
+
+ /* We should be able to check here if the fb has the same properties
+ * and then just flip_or_move it */
+ if (set->crtc->primary->fb != set->fb) {
+ DRM_DEBUG_KMS("Old FB: %p, New FB[%d]: %p",
+ set->crtc->primary->fb,
+ set->fb->base.id,
+ set->fb);
+ /* If we have no fb then treat it as a full mode set */
+ if (set->crtc->primary->fb == NULL) {
+ DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
+ mode_changed = true;
+ } else if (set->fb == NULL) {
+ mode_changed = true;
+ } else if (set->fb->depth != set->crtc->primary->fb->depth) {
+ mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->primary->fb->bits_per_pixel) {
+ mode_changed = true;
+ } else {
+ fb_changed = true;
+ DRM_DEBUG_KMS("fbs do not match, set fb_changed to true\n");
+ }
+ } else {
+ DRM_DEBUG_KMS("FB hasn't changed since last set\n");
+ }
+
+ if (set->x != set->crtc->x || set->y != set->crtc->y) {
+ fb_changed = true;
+ DRM_DEBUG_KMS("Viewport Changed. Original: (%d,%d), New: (%d,%d)\n",
+ set->x,
+ set->y,
+ set->crtc->x,
+ set->crtc->y);
+ }
+
+ if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+ DRM_DEBUG_KMS("modes are different, set mode_changed=true\n");
+ drm_mode_debug_printmodeline(&set->crtc->mode);
+ drm_mode_debug_printmodeline(set->mode);
+ mode_changed = true;
+ }
+
+ /* traverse and find the appropriate connector,
+ use its encoder and crtc */
+ count = 0;
+ fail = 1;
+ acrtc = to_amdgpu_crtc(set->crtc);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ /* matching the display index */
+ if (to_amdgpu_connector(connector)->connector_id ==
+ acrtc->crtc_id) {
+ fail = 0;
+ break;
+ }
+ }
+
+ if (fail) {
+ ret = -EINVAL;
+ DRM_ERROR("Couldn't find a matching connector\n");
+ goto fail;
+ }
+
+ /* Get best encoder for connector found above
+ * TODO: Might need to traverse entire connector list at some point
+ */
+ connector_funcs = connector->helper_private;
+ new_encoder = connector->encoder;
+
+ /* NOTE: We're assuming a max of one connector per set, so no need to
+ * loop through connectors in set to find the correct one */
+ if (set->connectors[0] == connector) {
+ new_encoder = connector_funcs->best_encoder(connector);
+ /* if we can't get an encoder for a connector
+ we are setting now - then fail */
+ if (new_encoder == NULL)
+ /* don't break so fail path works correct */
+ fail = 1;
+ if (connector->dpms != DRM_MODE_DPMS_ON) {
+ DRM_DEBUG_KMS("connector dpms not on, full mode switch\n");
+ mode_changed = true;
+ }
+ }
+
+ if (new_encoder != connector->encoder) {
+ DRM_DEBUG_KMS("encoder changed, full mode switch\n");
+ mode_changed = true;
+ /* If the encoder is reused for another connector, then
+ * the appropriate crtc will be set later.
+ */
+ if (connector->encoder)
+ connector->encoder->crtc = NULL;
+ connector->encoder = new_encoder;
+ }
+
+
+ if (fail) {
+ ret = -EINVAL;
+ DRM_ERROR("Couldn't find an encoder\n");
+ goto fail;
+ }
+
+ if (connector->encoder->crtc != set->crtc) {
+ mode_changed = true;
+ connector->encoder->crtc = set->crtc;
+
+ DRM_DEBUG_KMS("New CRTC being used. Full mode set: [CONNECTOR:%d] to [CRTC:%d]\n",
+ connector->base.id,
+ set->crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("Crtc didn't change: [CONNECTOR:%d] on [CRTC:%d]\n",
+ connector->base.id,
+ set->crtc->base.id);
+ }
+
+ if (mode_changed) {
+ struct amdgpu_device *adev;
+
+ DRM_DEBUG_KMS("Attempting to set mode from userspace. Mode:\n");
+
+ drm_mode_debug_printmodeline(set->mode);
+
+ set->crtc->primary->fb = set->fb;
+
+ adev = set->crtc->dev->dev_private;
+
+ mutex_lock(&adev->dm.dal_mutex);
+ if (!amdgpu_dm_mode_set(
+ set->crtc,
+ set->mode,
+ set->x,
+ set->y,
+ save_set.fb)) {
+ DRM_ERROR(
+ "failed to set mode on [CRTC:%d, DISPLAY_IDX: %d]\n",
+ set->crtc->base.id,
+ acrtc->crtc_id);
+ set->crtc->primary->fb = save_set.fb;
+ ret = -EINVAL;
+ mutex_unlock(&adev->dm.dal_mutex);
+ goto fail;
+ }
+
+ mutex_unlock(&adev->dm.dal_mutex);
+
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS(
+ "\t[CONNECTOR:%d] set DPMS on\n",
+ set->connectors[i]->base.id);
+
+ set->connectors[i]->funcs->dpms(
+ set->connectors[i], DRM_MODE_DPMS_ON);
+
+ }
+
+ /* Re-set the cursor attributes after a successful set mode */
+ dm_crtc_cursor_reset(set->crtc);
+
+ } else if (fb_changed) { /* no mode change just surface change. */
+ struct plane_addr_flip_info addr_flip_info = { 0 };
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(set->fb);
+ struct amdgpu_device *adev = dev->dev_private;
+ struct plane_config pl_config = { { { 0 } } };
+
+ DRM_DEBUG_KMS("FB Changed, update address\n");
+ set->crtc->primary->fb = set->fb;
+ set->crtc->x = set->x;
+ set->crtc->y = set->y;
+
+ /* program plane config */
+ pl_config.display_index = acrtc->crtc_id;
+
+ /* Blank display before programming surface */
+ dal_set_blanking(adev->dm.dal, acrtc->crtc_id, true);
+ fill_plane_attributes(adev, &pl_config, set->crtc);
+ dal_setup_plane_configurations(adev->dm.dal, 1, &pl_config);
+ /* Program the surface addr and flip control */
+ addr_flip_info.display_index = acrtc->crtc_id;
+ amdgpu_dm_fill_surface_address(set->crtc, &addr_flip_info,
+ afb, save_set.fb);
+ dal_set_blanking(adev->dm.dal, acrtc->crtc_id, false);
+ }
+ DRM_DEBUG_KMS("=== Finished dm_set_config ===\n");
+
+ kfree(save_connectors);
+ kfree(save_encoders);
+ kfree(save_crtcs);
+
+ /* adjust pm to dpms */
+ amdgpu_pm_compute_clocks(adev);
+
+ return 0;
+
+fail:
+ /* Restore all previous data. */
+ DRM_ERROR("### Failed set_config. Attempting to restore previous data ###\n");
+ count = 0;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ *crtc = save_crtcs[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ *encoder = save_encoders[count++];
+ }
+
+ count = 0;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ *connector = save_connectors[count++];
+ }
+
+ /* Try to restore the config */
+ if (mode_changed && save_set.crtc->primary->fb &&
+ !amdgpu_dm_mode_set(
+ save_set.crtc,
+ save_set.mode,
+ save_set.x,
+ save_set.y,
+ save_set.fb))
+ DRM_ERROR("failed to restore config after modeset failure\n");
+
+ kfree(save_connectors);
+ kfree(save_encoders);
+ kfree(save_crtcs);
+
+ DRM_DEBUG_KMS("=== Finished dm_set_config ===\n");
+ return ret;
+}
+
+void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *dm_crtc = to_amdgpu_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ destroy_workqueue(dm_crtc->pflip_queue);
+ kfree(crtc);
+}
+
+static void amdgpu_dm_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t start, uint32_t size)
+{
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ uint32_t display_index = acrtc->crtc_id;
+ int end = (start + size > 256) ? 256 : start + size;
+ int i;
+ struct raw_gamma_ramp *gamma;
+
+ gamma = kzalloc(sizeof(struct raw_gamma_ramp), GFP_KERNEL);
+
+ for (i = start; i < end; i++) {
+ gamma->rgb_256[i].red = red[i];
+ gamma->rgb_256[i].green = green[i];
+ gamma->rgb_256[i].blue = blue[i];
+ }
+
+ gamma->size = sizeof(gamma->rgb_256);
+ gamma->type = GAMMA_RAMP_TYPE_RGB256;
+
+ dal_set_gamma(adev->dm.dal, display_index, gamma);
+ kfree(gamma);
+}
+
+/* Implemented only the options currently availible for the driver */
+static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
+/* .save = NULL,
+ .restore = NULL,
+ .reset = NULL,*/
+ .cursor_set = dm_crtc_cursor_set,
+ .cursor_move = dm_crtc_cursor_move,
+ .destroy = amdgpu_dm_crtc_destroy,
+ .gamma_set = amdgpu_dm_crtc_gamma_set,
+ .set_config = amdgpu_dm_set_config,
+ .page_flip = amdgpu_crtc_page_flip /* this function is common for
+ all implementations of DCE code (original and
+ DAL) */
+
+ /*.set_property = NULL*/
+};
+
+static inline void fill_drm_mode_info(
+ struct drm_display_mode *drm_mode,
+ const struct mode_timing *mode_timing,
+ const struct render_mode *rm,
+ const struct refresh_rate *rr)
+{
+ drm_mode->hsync_start = mode_timing->mode_info.pixel_width +
+ mode_timing->crtc_timing.h_front_porch;
+ drm_mode->hsync_end = mode_timing->mode_info.pixel_width +
+ mode_timing->crtc_timing.h_front_porch +
+ mode_timing->crtc_timing.h_sync_width;
+ drm_mode->htotal = mode_timing->crtc_timing.h_total;
+ drm_mode->hdisplay = rm->view.width;
+ drm_mode->vsync_start = mode_timing->mode_info.pixel_height +
+ mode_timing->crtc_timing.v_front_porch;
+ drm_mode->vsync_end = mode_timing->mode_info.pixel_height +
+ mode_timing->crtc_timing.v_front_porch +
+ mode_timing->crtc_timing.v_sync_width;
+ drm_mode->vtotal = mode_timing->crtc_timing.v_total;
+ drm_mode->vdisplay = rm->view.height;
+
+ drm_mode->clock = mode_timing->crtc_timing.pix_clk_khz;
+ drm_mode->vrefresh = rr->field_rate;
+ if (mode_timing->crtc_timing.flags.HSYNC_POSITIVE_POLARITY)
+ drm_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (mode_timing->crtc_timing.flags.VSYNC_POSITIVE_POLARITY)
+ drm_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+ if (mode_timing->crtc_timing.flags.INTERLACE)
+ drm_mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (mode_timing->mode_info.flags.PREFERRED)
+ drm_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(drm_mode);
+}
+
+/**
+ * amdgpu_display_manager_add_mode -
+ * add mode for the connector
+ * @connector: drm connector
+ * @mode: the mode
+ *
+ * Add the specified modes to the connector's mode list.
+ *
+ * Return 0 on success.
+ */
+static int dm_add_mode(
+ struct drm_connector *connector,
+ const struct mode_timing *mt,
+ const struct render_mode *rm,
+ const struct refresh_rate *rr)
+{
+ struct drm_display_mode *drm_mode;
+
+ if (!mt)
+ return -1;
+
+ drm_mode = drm_mode_create(connector->dev);
+
+ if (!drm_mode)
+ return -1;
+
+ fill_drm_mode_info(drm_mode, mt, rm, rr);
+
+ list_add(&drm_mode->head, &connector->modes);
+
+ return 0;
+}
+
+static void add_to_mq_helper(void *what, const struct path_mode *pm)
+{
+ dal_mode_query_pin_path_mode(what, pm);
+}
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_connector *aconnector =
+ to_amdgpu_connector(connector);
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ bool connected;
+ uint32_t display_index = aconnector->connector_id;
+
+ if (!adev->dm.dal)
+ return 0;
+
+ connected = (dal_get_connected_targets_vector(adev->dm.dal)
+ & (1 << display_index));
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+static void amdgpu_dm_crtc_disable(struct drm_crtc *crtc)
+{
+ DRM_DEBUG_KMS("NOT IMPLEMENTED\n");
+}
+
+static void amdgpu_dm_encoder_disable(struct drm_encoder *encoder)
+{
+ DRM_DEBUG_KMS("NOT IMPLEMENTED\n");
+}
+
+/**
+ * amdgpu_display_manager_fill_modes - get complete set of
+ * display timing modes per drm_connector
+ *
+ * @connector: DRM device connector
+ * @maxX: max width for modes
+ * @maxY: max height for modes
+ *
+ * LOCKING:
+ * Caller must hold mode config lock.
+ *
+ * Query connector and try to detect modes on it. Received
+ * modes are assumed to be filtered and validated and supported
+ * by the connector assuming set_mode for that connector will
+ * come immediately after this function call.
+ *
+ * Therefore all these modes will be put into the normal modes
+ * list.
+ *
+ * Intended to be used either at bootup time or when major configuration
+ * changes have occurred.
+ *
+ *
+ * RETURNS:
+ * Number of modes found on @connector.
+ */
+int amdgpu_display_manager_fill_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ struct amdgpu_connector *aconnector =
+ to_amdgpu_connector(connector);
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_display_mode *mode, *t;
+ unsigned int non_filtered_modes_num = 0;
+ struct mode_query *mq;
+ struct topology tp;
+
+ if (!adev->dm.dal)
+ return 0;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d, DISPLAY_IDX: %d]\n",
+ connector->connector_type_id, aconnector->connector_id);
+
+ /* clean all the previous modes on this connector */
+ list_for_each_entry_safe(mode, t, &connector->modes, head) {
+ list_del(&mode->head);
+ drm_mode_debug_printmodeline(mode);
+ DRM_DEBUG_KMS("Not using %s mode %d\n",
+ mode->name, mode->status);
+ drm_mode_destroy(dev, mode);
+ }
+
+
+ if (connector->status == connector_status_disconnected) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d] disconnected\n",
+ connector->connector_type_id);
+ drm_mode_connector_update_edid_property(connector, NULL);
+ goto prune;
+ }
+
+ /* get the mode list from DAL, iterate over it and add
+ the modes to drm connector mode list */
+
+ /* the critical assumtion here is that the returned list is
+ clean: no duplicates, all the modes are valid, ordered, and
+ can be actually set on the hardware */
+
+ init_dal_topology(&adev->dm, &tp, aconnector->connector_id);
+ mq = dal_get_mode_query(adev->dm.dal, &tp, adev->dm.mode_query_option);
+
+ if (!mq)
+ goto prune;
+
+ dal_pin_active_path_modes(
+ adev->dm.dal,
+ mq,
+ aconnector->connector_id,
+ add_to_mq_helper);
+
+ if (!dal_mode_query_select_first(mq))
+ goto prune;
+
+ do {
+ const struct render_mode *rm =
+ dal_mode_query_get_current_render_mode(mq);
+
+ if (rm->pixel_format != PIXEL_FORMAT_ARGB8888)
+ continue;
+
+ do {
+ const struct refresh_rate *rr =
+ dal_mode_query_get_current_refresh_rate(mq);
+ const struct path_mode_set *pms =
+ dal_mode_query_get_current_path_mode_set(mq);
+ const struct path_mode *pm =
+ dal_pms_get_path_mode_for_display_index(
+ pms,
+ aconnector->connector_id);
+
+ const struct mode_timing *mt = pm->mode_timing;
+
+ if (mt->mode_info.pixel_height > maxY ||
+ mt->mode_info.pixel_width > maxX ||
+ mt->mode_info.flags.INTERLACE)
+ continue;
+
+ if (dm_add_mode(connector, mt, rm, rr) == 0)
+ ++non_filtered_modes_num;
+
+ if (adev->dm.fake_display_index ==
+ aconnector->connector_id)
+ break;
+
+ } while (dal_mode_query_select_next_refresh_rate(mq));
+
+ if (adev->dm.fake_display_index == aconnector->connector_id)
+ break;
+
+ } while (dal_mode_query_select_next_render_mode(mq));
+
+ dal_mode_query_destroy(&mq);
+
+prune:
+ DRM_DEBUG_KMS("[CONNECTOR:%d] probed modes :\n",
+ connector->connector_type_id);
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ drm_mode_set_crtcinfo(mode, 0);
+ drm_mode_debug_printmodeline(mode);
+ }
+
+ return non_filtered_modes_num;
+}
+
+static int amdgpu_dm_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ DRM_ERROR("NOT IMPLEMENTED\n");
+ return 0;
+}
+
+void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ /*drm_sysfs_connector_remove(connector);*/
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static void amdgpu_dm_connector_force(struct drm_connector *connector)
+{
+ DRM_ERROR("NOT IMPLEMENTED\n");
+}
+
+static inline enum dal_power_state to_dal_power_state(int mode)
+{
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ return DAL_POWER_STATE_ON;
+ case DRM_MODE_DPMS_OFF:
+ return DAL_POWER_STATE_OFF;
+ case DRM_MODE_DPMS_STANDBY:
+ return DAL_POWER_STATE_STANDBY;
+ case DRM_MODE_DPMS_SUSPEND:
+ return DAL_POWER_STATE_SUSPEND;
+ default:
+ /*
+ * if unknown dpms mode passed for any reason display would be
+ * disabled, with log notification
+ */
+ DRM_ERROR("Invalid DPMS mode requested\n");
+ return DAL_POWER_STATE_OFF;
+ }
+}
+
+
+static void amdgpu_dm_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_connector *aconnector = to_amdgpu_connector(connector);
+ uint32_t display_index = aconnector->connector_id;
+ enum dal_power_state ps = to_dal_power_state(mode);
+
+ if (mode == connector->dpms)
+ return;
+
+ dal_set_display_dpms(adev->dm.dal, display_index, ps);
+
+ connector->dpms = mode;
+
+ /* adjust pm to dpms */
+ amdgpu_pm_compute_clocks(adev);
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .dpms = amdgpu_dm_connector_dpms,
+/* .save = NULL,
+ .restore = NULL,
+ .reset = NULL,*/
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = amdgpu_display_manager_fill_modes,
+ .set_property = amdgpu_dm_connector_set_property,
+ .destroy = amdgpu_dm_connector_destroy,
+ .force = amdgpu_dm_connector_force
+};
+
+static void dm_crtc_helper_dpms(struct drm_crtc *crtc, int mode)
+{
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ drm_crtc_vblank_on(crtc);
+ break;
+ case DRM_MODE_DPMS_OFF:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_STANDBY:
+ default:
+ drm_crtc_vblank_off(crtc);
+ break;
+ }
+
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
+ .disable = amdgpu_dm_crtc_disable,
+ .dpms = dm_crtc_helper_dpms,
+ .load_lut = NULL
+};
+
+static const struct drm_encoder_helper_funcs dm_encoder_helper_funcs = {
+ .disable = amdgpu_dm_encoder_disable,
+};
+
+
+int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_crtc *acrtc,
+ int display_idx)
+{
+ int res = drm_crtc_init(
+ dm->ddev,
+ &acrtc->base,
+ &amdgpu_dm_crtc_funcs);
+
+ if (res)
+ goto fail;
+
+ drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
+
+ acrtc->max_cursor_width = 128;
+ acrtc->max_cursor_height = 128;
+
+ acrtc->crtc_id = display_idx;
+ acrtc->base.enabled = false;
+
+ dm->adev->mode_info.crtcs[display_idx] = acrtc;
+ drm_mode_crtc_set_gamma_size(&acrtc->base, 256);
+
+ acrtc->pflip_queue =
+ create_singlethread_workqueue("amdgpu-pageflip-queue");
+
+ return 0;
+fail:
+ acrtc->crtc_id = -1;
+ return res;
+}
+
+static struct drm_encoder *best_encoder(struct drm_connector *connector)
+{
+ int enc_id = connector->encoder_ids[0];
+ struct drm_mode_object *obj;
+ struct drm_encoder *encoder;
+
+ DRM_DEBUG_KMS("Finding the best encoder\n");
+
+ /* pick the encoder ids */
+ if (enc_id) {
+ obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER);
+ if (!obj) {
+ DRM_ERROR("Couldn't find a matching encoder for our connector\n");
+ return NULL;
+ }
+ encoder = obj_to_encoder(obj);
+ return encoder;
+ }
+ DRM_ERROR("No encoder id\n");
+ return NULL;
+}
+
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ .best_encoder = best_encoder
+};
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ return DRM_MODE_CONNECTOR_DVID;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+int amdgpu_dm_connector_init(
+ struct amdgpu_display_manager *dm,
+ struct amdgpu_connector *aconnector,
+ int display_idx,
+ bool is_connected,
+ struct amdgpu_encoder *aencoder)
+{
+ int res, connector_type;
+ enum signal_type st = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ DRM_DEBUG_KMS("amdgpu_dm_connector_init\n");
+
+ if (dm->dal != NULL)
+ st = dal_get_display_signal(dm->dal, display_idx);
+ connector_type = to_drm_connector_type(st);
+
+ res = drm_connector_init(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ return res;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ aconnector->connector_id = display_idx;
+ aconnector->base.interlace_allowed = true;
+ aconnector->base.doublescan_allowed = true;
+ aconnector->hpd.hpd = display_idx; /* maps to 'enum amdgpu_hpd_id' */
+
+ if (is_connected)
+ aconnector->base.status = connector_status_connected;
+ else
+ aconnector->base.status = connector_status_disconnected;
+
+ /*configure suport HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported*/
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ /* TODO: Don't do this manually anymore
+ aconnector->base.encoder = &aencoder->base;
+ */
+
+ drm_mode_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ /*drm_sysfs_connector_add(&dm_connector->base);*/
+
+ /* TODO: this switch should be updated during hotplug/unplug*/
+ if (dm->dal != NULL && is_connected) {
+ DRM_DEBUG_KMS("Connector is connected\n");
+ drm_mode_connector_update_edid_property(
+ &aconnector->base,
+ (struct edid *)
+ dal_get_display_edid(dm->dal, display_idx, NULL));
+ }
+
+ drm_connector_register(&aconnector->base);
+
+ return 0;
+}
+
+int amdgpu_dm_encoder_init(
+ struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ int display_idx,
+ struct amdgpu_crtc *acrtc)
+{
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+
+ aencoder->base.possible_crtcs = 1 << display_idx;
+ aencoder->base.crtc = &acrtc->base;
+
+ if (!res)
+ aencoder->encoder_id = display_idx;
+
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &dm_encoder_helper_funcs);
+
+ return res;
+}
diff --git a/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h
new file mode 100644
index 000000000000..721d83435f93
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/amdgpu_dm/amdgpu_dm_types.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012-13 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __AMDGPU_DM_TYPES_H__
+#define __AMDGPU_DM_TYPES_H__
+
+#include <drm/drmP.h>
+
+struct plane_addr_flip_info;
+struct amdgpu_framebuffer;
+struct amdgpu_display_manager;
+
+/*TODO Jodan Hersen use the one in amdgpu_dm*/
+int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_crtc *amdgpu_crtc,
+ int display_idx);
+int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_connector *amdgpu_connector,
+ int display_idx,
+ bool is_connected,
+ struct amdgpu_encoder *amdgpu_encoder);
+int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *amdgpu_encoder,
+ int display_idx,
+ struct amdgpu_crtc *amdgpu_crtc);
+
+
+void amdgpu_dm_fill_surface_address(struct drm_crtc *crtc,
+ struct plane_addr_flip_info *info,
+ struct amdgpu_framebuffer *afb,
+ struct drm_framebuffer *old_fb);
+
+void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc);
+void amdgpu_dm_connector_destroy(struct drm_connector *connector);
+void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder);
+
+bool amdgpu_dm_mode_reset(struct drm_crtc *crtc);
+
+bool amdgpu_dm_mode_set(
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x,
+ int y,
+ struct drm_framebuffer *old_fb);
+
+#endif /* __AMDGPU_DM_TYPES_H__ */