summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c3
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_internal.h2
-rw-r--r--drivers/gpu/drm/drm_lease.c2
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c54
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c44
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c30
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c591
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c10
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c5
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c18
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c58
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h37
67 files changed, 952 insertions, 492 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 104b2e0d893b..b0fc116296cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -233,7 +233,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
+#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8816c697b205..387f1cf1dc20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
- ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (type == CGS_UCODE_ID_SMU) {
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe2) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
@@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
- (adev->pdev->revision == 0xef))) {
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ } else {
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ }
break;
case CHIP_VEGAM:
strcpy(fw_name, "amdgpu/vegam_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 663043c8f0f5..0acc8dee2cb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -124,14 +124,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f9b54236102d..95f4c4139fc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
+ [AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgput_ctx_total_num_entities(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7f1a3a..74b611e8a1b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -872,7 +872,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Vega 12 */
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +891,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 81732a84c2ab..8f3d44e5e787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- /* Ensure IB tests are run on ring */
- flush_delayed_work(&adev->late_init_work);
-
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
struct amdgpu_fpriv *fpriv;
int r, pasid;
+ /* Ensure IB tests are run on ring */
+ flush_delayed_work(&adev->late_init_work);
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1d3265c97b70..747c068379dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "tonga";
break;
case CHIP_POLARIS11:
- chip_name = "polaris11";
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff))))
+ chip_name = "polaris11_k";
+ else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2))
+ chip_name = "polaris11_k";
+ else
+ chip_name = "polaris11";
break;
case CHIP_POLARIS10:
- chip_name = "polaris10";
+ if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7)))
+ chip_name = "polaris10_k";
+ else
+ chip_name = "polaris10";
break;
case CHIP_POLARIS12:
- chip_name = "polaris12";
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10))))
+ chip_name = "polaris12_k";
+ else
+ chip_name = "polaris12";
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 data, vbios_version;
+ u32 data;
int i, ucode_size, regs_size;
/* Skip MC ucode loading on SR-IOV capable boards.
@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
- data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
- vbios_version = data & 0xf;
-
- if (vbios_version == 0)
- return 0;
-
if (!adev->gmc.fw)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index eae90922fdbe..322e09b5b448 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
/**
* vcn_v1_0_early_init - set function pointers
@@ -222,7 +223,7 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
- vcn_v1_0_stop(adev);
+ vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
ring->ready = false;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a9f18ea7e354..e4ded890b1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -337,12 +337,19 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6864, &vega10_device_info }, /* Vega10 */
{ 0x6867, &vega10_device_info }, /* Vega10 */
{ 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x6869, &vega10_device_info }, /* Vega10 */
+ { 0x686A, &vega10_device_info }, /* Vega10 */
+ { 0x686B, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x686D, &vega10_device_info }, /* Vega10 */
+ { 0x686E, &vega10_device_info }, /* Vega10 */
+ { 0x686F, &vega10_device_info }, /* Vega10 */
{ 0x687F, &vega10_device_info }, /* Vega10 */
{ 0x66a0, &vega20_device_info }, /* Vega20 */
{ 0x66a1, &vega20_device_info }, /* Vega20 */
{ 0x66a2, &vega20_device_info }, /* Vega20 */
{ 0x66a3, &vega20_device_info }, /* Vega20 */
+ { 0x66a4, &vega20_device_info }, /* Vega20 */
{ 0x66a7, &vega20_device_info }, /* Vega20 */
{ 0x66af, &vega20_device_info } /* Vega20 */
};
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ca925200fe09..5a6edf65c9ea 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2554,9 +2554,9 @@ static void fill_audio_info(struct audio_info *audio_info,
cea_revision = drm_connector->display_info.cea_rev;
- strncpy(audio_info->display_name,
+ strscpy(audio_info->display_name,
edid_caps->display_name,
- AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
if (cea_revision >= 3) {
audio_info->mode_count = edid_caps->audio_mode_count;
@@ -3042,6 +3042,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
+ state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -3063,6 +3064,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
new_state->freesync_capable = state->freesync_capable;
new_state->freesync_enable = state->freesync_enable;
+ new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
@@ -3650,7 +3652,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
mode->hdisplay = hdisplay;
mode->vdisplay = vdisplay;
mode->type &= ~DRM_MODE_TYPE_PREFERRED;
- strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
return mode;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b459867a05b2..a6bcb90e8419 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2512,6 +2512,8 @@ static void pplib_apply_display_requirements(
dc,
context->bw.dce.sclk_khz);
+ pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 85119c2bdcc8..a2a7e0e94aa6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
- if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
+ /* Skip for suspend/resume case */
+ if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
+ && adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 47ac92369739..0173d0480024 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_pre_display_configuration_changed(hwmgr);
+ if (ret)
+ return ret;
ret = phm_set_cpu_power_state(hwmgr);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 91ffb7bc4ee7..56437866d120 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
if (skip)
return 0;
- phm_pre_display_configuration_changed(hwmgr);
-
phm_display_configuration_changed(hwmgr);
if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 88f6b35ea6fe..b61a01f55284 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3589,8 +3589,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= sclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
} else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
@@ -3607,8 +3609,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= mclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index e2bc6e0c229f..79c86247d0ac 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= sclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
}
for (i = 0; i < mclk_table->count; i++) {
@@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= mclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index b4eadd47f3a4..3b7fce5d7258 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
- data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
+ data->registry_data.fclk_gfxclk_ratio = 0;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
@@ -1660,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level(
return i;
}
-static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t min_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1676,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1692,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret;
}
-static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t max_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
@@ -2239,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2259,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index 62f36ba2435b..c1a99dfe4913 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
+
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 872d3824337b..a1e0ac9ae248 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1985,6 +1985,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ /* Apply avfs cks-off voltages to avoid the overshoot
+ * when switching to the highest sclk frequency
+ */
+ if (data->apply_avfs_cks_off_voltage)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 99d5e4f98f49..a6edd5df33b0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 0cd827e11fa2..de26df0c6044 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
{
struct ast_framebuffer *afb = &afbdev->afb;
+ drm_crtc_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 680566d97adc..10243965ee7c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -54,7 +54,7 @@
#define SN_AUX_ADDR_7_0_REG 0x76
#define SN_AUX_LENGTH_REG 0x77
#define SN_AUX_CMD_REG 0x78
-#define AUX_CMD_SEND BIT(1)
+#define AUX_CMD_SEND BIT(0)
#define AUX_CMD_REQ(x) ((x) << 4)
#define SN_AUX_RDATA_REG(x) (0x79 + (x))
#define SN_SSC_CONFIG_REG 0x93
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dd852a25d375..9d64f874f965 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
static bool drm_leak_fbdev_smem = false;
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
-MODULE_PARM_DESC(fbdev_emulation,
+MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
#endif
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 0c4eb4a9ab31..51e06defc8d8 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -104,6 +104,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
int drm_sysfs_connector_add(struct drm_connector *connector);
void drm_sysfs_connector_remove(struct drm_connector *connector);
+void drm_sysfs_lease_event(struct drm_device *dev);
+
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 24a177ea5417..c61680ad962d 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master)
if (master->lessor) {
/* Tell the master to check the lessee list */
- drm_sysfs_hotplug_event(dev);
+ drm_sysfs_lease_event(dev);
drm_master_put(&master->lessor);
}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index b3c1daad1169..ecb7b33002bb 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
connector->kdev = NULL;
}
+void drm_sysfs_lease_event(struct drm_device *dev)
+{
+ char *event_string = "LEASE=1";
+ char *envp[] = { event_string, NULL };
+
+ DRM_DEBUG("generating lease event\n");
+
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
* @dev: DRM device
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 481896fb712a..85e6736f0a32 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->bpp = skl_pixel_formats[fmt].bpp;
plane->drm_format = skl_pixel_formats[fmt].drm_format;
} else {
- plane->tiled = !!(val & DISPPLANE_TILED);
+ plane->tiled = val & DISPPLANE_TILED;
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
plane->bpp = bdw_pixel_formats[fmt].bpp;
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ffdbbac4400e..47062ee979cf 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1444,6 +1444,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
+ intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9102571e9692..872a2e159a5f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -67,6 +67,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
+#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -1805,6 +1806,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
struct i915_workarounds workarounds;
+ struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -2148,6 +2150,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
+
+ struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@@ -3870,4 +3874,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+ return i915_ggtt_offset(i915->gt.scratch);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..6ae9a6080cc8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5305,7 +5305,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
}
}
- intel_gt_workarounds_apply(dev_priv);
+ intel_gt_apply_workarounds(dev_priv);
i915_gem_init_swizzling(dev_priv);
@@ -5500,6 +5500,44 @@ err_active:
goto out_ctx;
}
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ i915->gt.scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+ i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -5546,12 +5584,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_scratch(dev_priv,
+ IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
+ ret = i915_gem_contexts_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_scratch;
+ }
+
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5669,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
+err_scratch:
+ i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5722,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
+
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d4fac09095f8..1aaccbe7e1de 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
- len = 6;
+ len = 3;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
-
- /* And again for good measure (blb/pnv) */
- *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *batch++ = addr;
- *batch++ = target_offset;
}
goto out;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3eb33e000d6f..db4128d6c09b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1495,7 +1495,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
- engine->scratch);
+ i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..76b5f94ea6cb 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -490,46 +490,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- WARN_ON(engine->scratch);
-
- obj = i915_gem_object_create_stolen(engine->i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
- i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +664,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- intel_engine_cleanup_scratch(engine);
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +678,8 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
+
+ intel_wa_list_free(&engine->wa_list);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 37c94a54efcb..58d1d3d47dd3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -442,8 +442,13 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
+ *
+ * Furthermore, Braswell, at least, wants a full mb to be sure that
+ * the writes are coherent in memory (visible to the GPU) prior to
+ * execution, and not just visible to other CPUs (as is the result of
+ * wmb).
*/
- wmb();
+ mb();
return ce->lrc_desc;
}
@@ -1443,9 +1448,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
+ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1459,7 +1465,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@@ -1496,7 +1502,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch) +
+ i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1573,7 +1579,7 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
+ i915_scratch_offset(engine->i915)
+ 2 * CACHELINE_BYTES);
}
@@ -1793,6 +1799,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
+ intel_engine_apply_workarounds(engine);
+
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -2139,7 +2147,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2476,10 +2484,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- goto err_cleanup_common;
-
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2491,11 +2495,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return 0;
+ intel_engine_init_workarounds(engine);
-err_cleanup_common:
- intel_engine_cleanup_common(engine);
- return ret;
+ return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 187bb0ceb4ac..1f8d2a66c791 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
+ unsigned int num_store_dw;
u32 cmd, *cs;
cmd = MI_FLUSH;
-
+ num_store_dw = 0;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
+ if (mode & EMIT_FLUSH)
+ num_store_dw = 4;
- cs = intel_ring_begin(rq, 2);
+ cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
- *cs++ = MI_NOOP;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = 0;
+ }
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+
intel_ring_advance(rq, cs);
return 0;
@@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -971,7 +975,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@@ -979,7 +983,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+ u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+ GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1437,7 +1443,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
- unsigned int size;
int err;
intel_engine_setup_common(engine);
@@ -1462,21 +1467,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
- size = PAGE_SIZE;
- if (HAS_BROKEN_CS_TLB(engine->i915))
- size = I830_WA_SIZE;
- err = intel_engine_create_scratch(engine, size);
- if (err)
- goto err_unpin;
-
err = intel_engine_init_common(engine);
if (err)
- goto err_scratch;
+ goto err_unpin;
return 0;
-err_scratch:
- intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@@ -1550,7 +1546,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1659,7 +1655,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..767a7192c969 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@@ -440,7 +441,7 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- struct i915_vma *scratch;
+ struct i915_wa_list wa_list;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -898,10 +899,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..6e580891db96 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,6 +48,20 @@
* - Public functions to init or apply the given workaround type.
*/
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
+{
+ wal->name = name;
+}
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+ wal->count, wal->name);
+}
+
static void wa_add(struct drm_i915_private *i915,
i915_reg_t reg, const u32 mask, const u32 val)
{
@@ -580,160 +594,175 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wal_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ const unsigned int grow = 1 << 4;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list)
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+ wal->list = list;
+ }
+
+ wal->list[wal->count++] = *wa;
+}
+
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = val,
+ .val = _MASKED_BIT_ENABLE(val)
+ };
+
+ wal_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ wal_add(wal, &wa);
}
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ wa_write_masked_or(wal, reg, ~0, val);
}
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+ wa_write_masked_or(wal, reg, val, val);
+}
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ if (!IS_COFFEELAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+ gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- for_each_engine(engine, dev_priv, tmp) {
- if (engine->id == RCS)
- continue;
-
- I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
- }
- }
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ gen9_gt_workarounds_init(i915);
}
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- u32 mcr;
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@@ -770,8 +799,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- mcr = I915_READ(GEN8_MCR_SELECTOR);
-
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@@ -789,148 +816,170 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
- I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+ wa_write_masked_or(wal,
+ GEN8_MCR_SELECTOR,
+ mcr_slice_subslice_mask,
+ intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+ wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaPipelineFlushCoherentLines:icl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* Wa_1405543622:icl
- * Formerly known as WaGAPZPriorityScheme
- */
- I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
- GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
- /* Wa_1604223664:icl
- * Formerly known as WaL3BankAddressHashing
- */
- I915_WRITE(GEN8_GARBCNTL,
- (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
- GEN11_HASH_CTRL_EXCL_BIT0);
- I915_WRITE(GEN11_GLBLINVL,
- (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- I915_WRITE(GEN11_GACB_PERF_CTRL,
- (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
- /* Wa_1405733216:icl
- * Formerly known as WaDisableCleanEvicts
- */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_masked_or(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
- I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
- GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
- GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
- I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
- GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- MSCUNIT_CLKGATE_DIS);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
- GWUNIT_CLKGATE_DIS);
-
- /* Wa_1604302699:icl */
- I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
- I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
- GEN11_I2M_WRITE_DISABLE);
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
- I915_READ(INF_UNIT_LEVEL_CLKGATE) |
- CGPSF_CLKGATE_DIS);
-
- /* WaForwardProgressSoftReset:icl */
- I915_WRITE(GEN10_SCRATCH_LNCF2,
- I915_READ(GEN10_SCRATCH_LNCF2) |
- PMFLUSHDONE_LNICRSDROP |
- PMFLUSH_GAPL3UNBLOCK |
- PMFLUSHDONE_LNEBLK);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ INF_UNIT_LEVEL_CLKGATE,
+ CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
- if (INTEL_GEN(dev_priv) < 8)
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+
+ if (INTEL_GEN(i915) < 8)
return;
- else if (IS_BROADWELL(dev_priv))
- bdw_gt_workarounds_apply(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_gt_workarounds_apply(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- skl_gt_workarounds_apply(dev_priv);
- else if (IS_BROXTON(dev_priv))
- bxt_gt_workarounds_apply(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- kbl_gt_workarounds_apply(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- glk_gt_workarounds_apply(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- cfl_gt_workarounds_apply(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_gt_workarounds_apply(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- icl_gt_workarounds_apply(dev_priv);
+ else if (IS_BROADWELL(i915))
+ return;
+ else if (IS_CHERRYVIEW(i915))
+ return;
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_gt_workarounds_init(i915);
+ else if (IS_ICELAKE(i915))
+ icl_gt_workarounds_init(i915);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
+
+ wa_init_finish(wal);
+}
+
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
+
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
+ return;
+
+ fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val = I915_READ_FW(wa->reg);
+
+ val &= ~wa->mask;
+ val |= wa->val;
+
+ I915_WRITE_FW(wa->reg, val);
+ }
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+ wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
}
struct whitelist {
@@ -1077,6 +1126,146 @@ void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
whitelist_apply(engine, whitelist_build(engine, &w));
}
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (IS_ICELAKE(i915)) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_masked_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_masked_or(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* Wa_1604302699:icl */
+ wa_write_or(wal,
+ GEN10_L3_CHICKEN_MODE_REGISTER,
+ GEN11_I2M_WRITE_DISABLE);
+
+ /* WaForwardProgressSoftReset:icl */
+ wa_write_or(wal,
+ GEN10_SCRATCH_LNCF2,
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+ }
+
+ if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (IS_GEN9(i915)) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_masked_or(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+ }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ wa_init_start(wal, engine->name);
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine);
+ else
+ xcs_engine_wa_init(engine);
+
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->i915, &engine->wa_list);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..979695a53964 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,35 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
+#include <linux/slab.h>
+
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
+
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
int intel_ctx_workarounds_emit(struct i915_request *rq);
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
+
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 66df1b177959..27b507eb4a99 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
dsi->encoder.possible_crtcs = 1;
/* If there's a bridge, attach to it and let it create the connector */
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
-
+ if (dsi->bridge) {
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ goto err_encoder_cleanup;
+ }
+ } else {
/* Otherwise create our own connector and attach to a panel */
ret = mtk_dsi_create_connector(drm, dsi);
if (ret)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d4530d60767b..ca169f013a14 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1594,7 +1594,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
- plane->crtc = crtc;
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 96cdf06e7da2..d31d8281424e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -488,8 +488,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
drm_encoder_cleanup(drm_enc);
mutex_destroy(&dpu_enc->enc_lock);
-
- kfree(dpu_enc);
}
void dpu_encoder_helper_split_config(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index bfcd165e96df..d743e7ca6a3c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, 0,
+ false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 4c03f0b7343e..41bec570c518 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -39,6 +39,8 @@
#define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2
+#define VCO_REF_CLK_RATE 19200000
+
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
u32 pll_lockdet_rate;
@@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
parent_rate);
pll_10nm->vco_current_rate = rate;
- pll_10nm->vco_ref_clk_rate = parent_rate;
+ pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
dsi_pll_setup_config(pll_10nm);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index c79659ca5706..adbdce3aeda0 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
+ ret = msm_hdmi_hpd_enable(hdmi->connector);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
encoder->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
@@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- static struct hdmi_platform_config *hdmi_cfg;
+ struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
int i, err;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index accc9a61611d..5c5df6ab2a57 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
void msm_hdmi_connector_irq(struct drm_connector *connector);
struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
+int msm_hdmi_hpd_enable(struct drm_connector *connector);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index e9c9a0af508e..30e908dfded7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
-static int hpd_enable(struct hdmi_connector *hdmi_connector)
+int msm_hdmi_hpd_enable(struct drm_connector *connector)
{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
@@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
{
struct drm_connector *connector = NULL;
struct hdmi_connector *hdmi_connector;
- int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
if (!hdmi_connector)
@@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = hpd_enable(hdmi_connector);
- if (ret) {
- dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- return ERR_PTR(ret);
- }
-
drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 4bcdeca7479d..2088a20eb270 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (!new_crtc_state->active)
continue;
+ if (drm_crtc_vblank_get(crtc))
+ continue;
+
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+
+ drm_crtc_vblank_put(crtc);
}
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index f0da0d3c8a80..d756436c1fcd 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
@@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
- kfree(show_priv);
- return ret;
+ goto free_priv;
}
show_priv->dev = dev;
- return single_open(file, msm_gpu_show, show_priv);
+ ret = single_open(file, msm_gpu_show, show_priv);
+ if (ret)
+ goto free_priv;
+
+ return 0;
+
+free_priv:
+ kfree(show_priv);
+ return ret;
}
static const struct file_operations msm_gpu_fops = {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 4904d0d41409..dcff812c63d0 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -553,17 +553,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
- ret = sched_setscheduler(priv->disp_thread[i].thread,
- SCHED_FIFO, &param);
- if (ret)
- pr_warn("display thread priority update failed: %d\n",
- ret);
-
if (IS_ERR(priv->disp_thread[i].thread)) {
dev_err(dev, "failed to create crtc_commit kthread\n");
priv->disp_thread[i].thread = NULL;
+ goto err_msm_uninit;
}
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ dev_warn(dev, "disp_thread set priority failed: %d\n",
+ ret);
+
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
@@ -572,6 +573,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ goto err_msm_uninit;
+ }
+
/**
* event thread should also run at same priority as disp_thread
* because it is handling frame_done events. A lower priority
@@ -580,34 +587,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
* failure at crtc commit level.
*/
ret = sched_setscheduler(priv->event_thread[i].thread,
- SCHED_FIFO, &param);
+ SCHED_FIFO, &param);
if (ret)
- pr_warn("display event thread priority update failed: %d\n",
- ret);
-
- if (IS_ERR(priv->event_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_event kthread\n");
- priv->event_thread[i].thread = NULL;
- }
-
- if ((!priv->disp_thread[i].thread) ||
- !priv->event_thread[i].thread) {
- /* clean up previously created threads if any */
- for ( ; i >= 0; i--) {
- if (priv->disp_thread[i].thread) {
- kthread_stop(
- priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
-
- if (priv->event_thread[i].thread) {
- kthread_stop(
- priv->event_thread[i].thread);
- priv->event_thread[i].thread = NULL;
- }
- }
- goto err_msm_uninit;
- }
+ dev_warn(dev, "event_thread set priority failed:%d\n",
+ ret);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 7a7923e6220d..6942604ad9a8 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -317,6 +317,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint32_t *ptr;
int ret = 0;
+ if (!nr_relocs)
+ return 0;
+
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
@@ -410,7 +413,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
@@ -443,6 +445,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
ring = gpu->rb[queue->prio];
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence)
@@ -452,11 +456,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
- if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
+ ret = 0;
+ if (!dma_fence_match_context(in_fence, ring->fctx->context))
ret = dma_fence_wait(in_fence, true);
- if (ret)
- return ret;
- }
+
+ dma_fence_put(in_fence);
+ if (ret)
+ return ret;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -582,8 +588,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
out:
- if (in_fence)
- dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 11aac8337066..2b7c8946adba 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -345,6 +345,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
{
struct msm_gpu_state *state;
+ /* Check if the target supports capturing crash state */
+ if (!gpu->funcs->gpu_state_get)
+ return;
+
/* Only save one crash state at a time */
if (gpu->crashstate)
return;
@@ -434,10 +438,9 @@ static void recover_worker(struct work_struct *work)
if (submit) {
struct task_struct *task;
- rcu_read_lock();
- task = pid_task(submit->pid, PIDTYPE_PID);
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- comm = kstrdup(task->comm, GFP_ATOMIC);
+ comm = kstrdup(task->comm, GFP_KERNEL);
/*
* So slightly annoying, in other paths like
@@ -450,10 +453,10 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ put_task_struct(task);
mutex_lock(&dev->struct_mutex);
}
- rcu_read_unlock();
if (comm && cmd) {
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index b23d33622f37..2a90aa4caec0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
// pm_runtime_put_sync(mmu->dev);
- WARN_ON(ret < 0);
+ WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index cca933458439..0c2c8d2c631f 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
uint64_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
+ unsigned offset = 0;
const char *buf;
if (iova) {
- buf += iova - submit->bos[idx].iova;
+ offset = iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
size = obj->base.size;
@@ -340,6 +341,8 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (IS_ERR(buf))
return;
+ buf += offset;
+
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6cbbae3f438b..db1bf7f88c1f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
+static void
+evo_flush(struct nv50_dmac *dmac)
+{
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+}
+
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
- /* Push buffer fetches are not coherent with BAR1, we need to ensure
- * writes have been flushed right through to VRAM before writing PUT.
- */
- if (dmac->push.type & NVIF_MEM_VRAM) {
- struct nvif_device *device = dmac->base.device;
- nvif_wr32(&device->object, 0x070000, 0x00000001);
- nvif_msec(device, 2000,
- if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
- break;
- );
- }
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
@@ -1264,6 +1271,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
+ drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b2baf6e0e0d..d2928d43f29a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1171,10 +1171,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
}
+ err = nouveau_drm_device_init(drm);
+ if (err)
+ goto err_put;
+
platform_set_drvdata(pdev, drm);
return drm;
+err_put:
+ drm_dev_put(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 1f8161b041be..465120809eb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 0a485c5b982e..00a9c2ab9e6c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5418,9 +5418,15 @@ static int dsi_probe(struct platform_device *pdev)
dsi->num_lanes_supported = 3;
}
+ r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (r) {
+ DSSERR("Failed to populate DSI child devices: %d\n", r);
+ goto err_pm_disable;
+ }
+
r = dsi_init_output(dsi);
if (r)
- goto err_pm_disable;
+ goto err_of_depopulate;
r = dsi_probe_of(dsi);
if (r) {
@@ -5428,22 +5434,16 @@ static int dsi_probe(struct platform_device *pdev)
goto err_uninit_output;
}
- r = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (r) {
- DSSERR("Failed to populate DSI child devices: %d\n", r);
- goto err_uninit_output;
- }
-
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
- goto err_of_depopulate;
+ goto err_uninit_output;
return 0;
-err_of_depopulate:
- of_platform_depopulate(dev);
err_uninit_output:
dsi_uninit_output(dsi);
+err_of_depopulate:
+ of_platform_depopulate(dev);
err_pm_disable:
pm_runtime_disable(dev);
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 1f698a95a94a..33e15cb77efa 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -432,7 +432,7 @@ struct omap_dss_device {
const struct omap_dss_driver *driver;
const struct omap_dss_device_ops *ops;
unsigned long ops_flags;
- unsigned long bus_flags;
+ u32 bus_flags;
/* helper variable for driver suspend/resume */
bool activate_after_resume;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 452e625f6ce3..933ebc9f9faa 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
+static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_connector *connector;
+ bool hdmi_mode;
+
+ hdmi_mode = false;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
+ break;
+ }
+ }
+
+ if (dssdev->ops->hdmi.set_hdmi_mode)
+ dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
+
+ if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
+ struct hdmi_avi_infoframe avi;
+ int r;
+
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
+ false);
+ if (r == 0)
+ dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
+ }
+}
+
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_connector *connector;
struct omap_dss_device *dssdev;
struct videomode vm = { 0 };
- bool hdmi_mode;
- int r;
drm_display_mode_to_videomode(adjusted_mode, &vm);
@@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
}
/* Set the HDMI mode and HDMI infoframe if applicable. */
- hdmi_mode = false;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
- break;
- }
- }
-
- dssdev = omap_encoder->output;
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
- false);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
+ if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
+ omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 941f35233b1f..5864cb452c5c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
-{
- rockchip_drm_platform_remove(pdev);
-}
-
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
- .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba80150d1052..895d77d799e4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
- ttm_bo_get(bo);
fbo->base = *bo;
+ fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
+
+ ttm_bo_get(bo);
fbo->bo = bo;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 61a84b958d67..d7a2dfb8ee9b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -49,6 +49,8 @@
#define VMWGFX_REPO "In Tree"
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@@ -918,7 +920,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_unlock(&dev_priv->cap_lock);
}
-
+ vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 59f614225bcd..aca974b14b55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -606,6 +606,9 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+ /* Validation memory reservation */
+ struct vmw_validation_mem vvm;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -846,6 +849,8 @@ extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+ size_t gran);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5a6b70ba137a..f2d13a72c05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
- int ret;
struct {
uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
- return ret;
}
@@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+ vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 7b1e5a5cbd2c..f88247046721 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -96,3 +96,39 @@ void vmw_ttm_global_release(struct vmw_private *dev_priv)
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
drm_global_item_unref(&dev_priv->mem_global_ref);
}
+
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
+{
+ static struct ttm_operation_ctx ctx = {.interruptible = false,
+ .no_wait_gpu = false};
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
+}
+
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
+{
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
+
+ return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
+}
+
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
+{
+ struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+ vvm->reserve_mem = vmw_vmt_reserve;
+ vvm->unreserve_mem = vmw_vmt_unreserve;
+ vvm->gran = gran;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..f116f092e00b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page;
+ if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+ int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+ if (ret)
+ return NULL;
+
+ ctx->vm_size_left += ctx->vm->gran;
+ ctx->total_mem += ctx->vm->gran;
+ }
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
+ if (ctx->vm)
+ ctx->vm_size_left -= PAGE_SIZE;
+
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
+ if (ctx->vm && ctx->total_mem) {
+ ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+ ctx->total_mem = 0;
+ ctx->vm_size_left = 0;
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index b57e3292c386..3b396fea40d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -34,6 +34,21 @@
#include <drm/ttm/ttm_execbuf_util.h>
/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+ int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+ void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+ size_t gran;
+};
+
+/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -47,6 +62,10 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
+ struct vmw_validation_mem *vm;
+ size_t vm_size_left;
+ size_t total_mem;
};
struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+ struct vmw_validation_mem *vm)
+{
+ ctx->vm = vm;
+}
+
+/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding