diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 21:34:42 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2017-02-26 21:34:42 +0100 |
commit | 8e22e1b3499a446df48c2b26667ca36c55bf864c (patch) | |
tree | 5329f98b3eb3c95a9dcbab0fa4f9b6e62f0e788d /drivers/gpu | |
parent | 00d3c14f14d51babd8aeafd5fa734ccf04f5ca3d (diff) | |
parent | 64a577196d66b44e37384bc5c4d78c61f59d5b2a (diff) |
Merge airlied/drm-next into drm-misc-next
Backmerge the main pull request to sync up with all the newly landed
drivers. Otherwise we'll have chaos even before 4.12 started in
earnest.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu')
344 files changed, 14886 insertions, 5070 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 90bc65d07a35..88e01e08e279 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -263,6 +263,8 @@ source "drivers/gpu/drm/mxsfb/Kconfig" source "drivers/gpu/drm/meson/Kconfig" +source "drivers/gpu/drm/tinydrm/Kconfig" + # Keep legacy drivers last menuconfig DRM_LEGACY diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 92de3991fa56..3ee95793d122 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -94,3 +94,4 @@ obj-$(CONFIG_DRM_ARCPGU)+= arc/ obj-y += hisilicon/ obj-$(CONFIG_DRM_ZTE) += zte/ obj-$(CONFIG_DRM_MXSFB) += mxsfb/ +obj-$(CONFIG_DRM_TINYDRM) += tinydrm/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 94a64e3bc682..c1b913541739 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1037,7 +1037,6 @@ struct amdgpu_uvd { bool use_ctx_buf; struct amd_sched_entity entity; uint32_t srbm_soft_reset; - bool is_powergated; }; /* @@ -1066,7 +1065,6 @@ struct amdgpu_vce { struct amd_sched_entity entity; uint32_t srbm_soft_reset; unsigned num_rings; - bool is_powergated; }; /* @@ -1484,6 +1482,9 @@ struct amdgpu_device { spinlock_t gtt_list_lock; struct list_head gtt_list; + /* record hw reset is performed */ + bool has_hw_reset; + }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1702,13 +1703,14 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) int amdgpu_gpu_reset(struct amdgpu_device *adev); bool amdgpu_need_backup(struct amdgpu_device *adev); void amdgpu_pci_config_reset(struct amdgpu_device *adev); -bool amdgpu_card_posted(struct amdgpu_device *adev); +bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, u32 ip_instance, u32 ring, struct amdgpu_ring **out_ring); +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index d9def01f276e..821f7cc2051f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -100,7 +100,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) resource_size_t size = 256 * 1024; /* ??? */ if (!(adev->flags & AMD_IS_APU)) - if (!amdgpu_card_posted(adev)) + if (amdgpu_need_post(adev)) return false; adev->bios = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a5df1ef306d9..d9e5aa4a79ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -834,32 +834,57 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_TOPAZ: if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) || ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) || - ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) + ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/topaz_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/topaz_smc.bin"); break; case CHIP_TONGA: if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) || - ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) + ((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) { + info->is_kicker = true; strcpy(fw_name, "amdgpu/tonga_k_smc.bin"); - else + } else strcpy(fw_name, "amdgpu/tonga_smc.bin"); break; case CHIP_FIJI: strcpy(fw_name, "amdgpu/fiji_smc.bin"); break; case CHIP_POLARIS11: - if (type == CGS_UCODE_ID_SMU) - strcpy(fw_name, "amdgpu/polaris11_smc.bin"); - else if (type == CGS_UCODE_ID_SMU_SK) + if (type == CGS_UCODE_ID_SMU) { + if (((adev->pdev->device == 0x67ef) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe2) || + (adev->pdev->revision == 0xe5))) || + ((adev->pdev->device == 0x67ff) && + ((adev->pdev->revision == 0xcf) || + (adev->pdev->revision == 0xef) || + (adev->pdev->revision == 0xff)))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris11_k_smc.bin"); + } else + strcpy(fw_name, "amdgpu/polaris11_smc.bin"); + } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin"); + } break; case CHIP_POLARIS10: - if (type == CGS_UCODE_ID_SMU) - strcpy(fw_name, "amdgpu/polaris10_smc.bin"); - else if (type == CGS_UCODE_ID_SMU_SK) + if (type == CGS_UCODE_ID_SMU) { + if ((adev->pdev->device == 0x67df) && + ((adev->pdev->revision == 0xe0) || + (adev->pdev->revision == 0xe3) || + (adev->pdev->revision == 0xe4) || + (adev->pdev->revision == 0xe5) || + (adev->pdev->revision == 0xe7) || + (adev->pdev->revision == 0xef))) { + info->is_kicker = true; + strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); + } else + strcpy(fw_name, "amdgpu/polaris10_smc.bin"); + } else if (type == CGS_UCODE_ID_SMU_SK) { strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin"); + } break; case CHIP_POLARIS12: strcpy(fw_name, "amdgpu/polaris12_smc.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index cf2e8c4e9b8b..d2d0f60ff36d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } @@ -344,8 +351,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev) * submission. This can result in a debt that can stop buffer migrations * temporarily. */ -static void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, - u64 num_bytes) +void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes) { spin_lock(&adev->mm_stats.lock); adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 944ba0d3874a..6abb238b25c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -619,25 +619,29 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) * GPU helpers function. */ /** - * amdgpu_card_posted - check if the hw has already been initialized + * amdgpu_need_post - check if the hw need post or not * * @adev: amdgpu_device pointer * - * Check if the asic has been initialized (all asics). - * Used at driver startup. - * Returns true if initialized or false if not. + * Check if the asic has been initialized (all asics) at driver startup + * or post is needed if hw reset is performed. + * Returns true if need or false if not. */ -bool amdgpu_card_posted(struct amdgpu_device *adev) +bool amdgpu_need_post(struct amdgpu_device *adev) { uint32_t reg; + if (adev->has_hw_reset) { + adev->has_hw_reset = false; + return true; + } /* then check MEM_SIZE, in case the crtcs are off */ reg = RREG32(mmCONFIG_MEMSIZE); if (reg) - return true; + return false; - return false; + return true; } @@ -665,7 +669,7 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) return true; } } - return !amdgpu_card_posted(adev); + return amdgpu_need_post(adev); } /** @@ -2071,7 +2075,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) amdgpu_atombios_scratch_regs_restore(adev); /* post card */ - if (!amdgpu_card_posted(adev) || !resume) { + if (amdgpu_need_post(adev)) { r = amdgpu_atom_asic_init(adev->mode_info.atom_context); if (r) DRM_ERROR("amdgpu asic init failed\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 9bd1b4eae32e..51d759463384 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -487,67 +487,44 @@ static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo) * * @adev: amdgpu_device pointer * @bo_va: bo_va to update + * @list: validation list + * @operation: map or unmap * - * Update the bo_va directly after setting it's address. Errors are not + * Update the bo_va directly after setting its address. Errors are not * vital here, so they are not reported back to userspace. */ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, + struct list_head *list, uint32_t operation) { - struct ttm_validate_buffer tv, *entry; - struct amdgpu_bo_list_entry vm_pd; - struct ww_acquire_ctx ticket; - struct list_head list, duplicates; - int r; - - INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&duplicates); - - tv.bo = &bo_va->bo->tbo; - tv.shared = true; - list_add(&tv.head, &list); - - amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); + struct ttm_validate_buffer *entry; + int r = -ERESTARTSYS; - /* Provide duplicates to avoid -EALREADY */ - r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); - if (r) - goto error_print; - - list_for_each_entry(entry, &list, head) { + list_for_each_entry(entry, list, head) { struct amdgpu_bo *bo = container_of(entry->bo, struct amdgpu_bo, tbo); - - /* if anything is swapped out don't swap it in here, - just abort and wait for the next CS */ - if (!amdgpu_bo_gpu_accessible(bo)) - goto error_unreserve; - - if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow)) - goto error_unreserve; + if (amdgpu_gem_va_check(NULL, bo)) + goto error; } r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check, NULL); if (r) - goto error_unreserve; + goto error; r = amdgpu_vm_update_page_directory(adev, bo_va->vm); if (r) - goto error_unreserve; + goto error; r = amdgpu_vm_clear_freed(adev, bo_va->vm); if (r) - goto error_unreserve; + goto error; if (operation == AMDGPU_VA_OP_MAP) r = amdgpu_vm_bo_update(adev, bo_va, false); -error_unreserve: - ttm_eu_backoff_reservation(&ticket, &list); - -error_print: +error: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } @@ -564,7 +541,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct amdgpu_bo_list_entry vm_pd; struct ttm_validate_buffer tv; struct ww_acquire_ctx ticket; - struct list_head list, duplicates; + struct list_head list; uint32_t invalid_flags, va_flags = 0; int r = 0; @@ -602,14 +579,13 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -ENOENT; abo = gem_to_amdgpu_bo(gobj); INIT_LIST_HEAD(&list); - INIT_LIST_HEAD(&duplicates); tv.bo = &abo->tbo; - tv.shared = true; + tv.shared = false; list_add(&tv.head, &list); amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) { drm_gem_object_unreference_unlocked(gobj); return r; @@ -640,10 +616,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, default: break; } - ttm_eu_backoff_reservation(&ticket, &list); if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug) - amdgpu_gem_va_update_vm(adev, bo_va, args->operation); + amdgpu_gem_va_update_vm(adev, bo_va, &list, args->operation); + ttm_eu_backoff_reservation(&ticket, &list); drm_gem_object_unreference_unlocked(gobj); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index d1aa291b2638..be80a4a68d7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -323,6 +323,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, struct amdgpu_bo *bo; enum ttm_bo_type type; unsigned long page_align; + u64 initial_bytes_moved; size_t acc_size; int r; @@ -374,8 +375,10 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, * See https://bugs.freedesktop.org/show_bug.cgi?id=88758 */ +#ifndef CONFIG_COMPILE_TEST #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \ thanks to write-combining +#endif if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " @@ -399,12 +402,20 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, locked = ww_mutex_trylock(&bo->tbo.ttm_resv.lock); WARN_ON(!locked); } + + initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, !kernel, NULL, acc_size, sg, resv ? resv : &bo->tbo.ttm_resv, &amdgpu_ttm_bo_destroy); - if (unlikely(r != 0)) + amdgpu_cs_report_moved_bytes(adev, + atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved); + + if (unlikely(r != 0)) { + if (!resv) + ww_mutex_unlock(&bo->tbo.resv->lock); return r; + } bo->tbo.priority = ilog2(bo->tbo.num_pages); if (kernel) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index a61882ddc804..346e80a7119b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1142,12 +1142,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) /* XXX select vce level based on ring/task */ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; mutex_unlock(&adev->pm.mutex); + amdgpu_pm_compute_clocks(adev); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); } else { + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); mutex_lock(&adev->pm.mutex); adev->pm.dpm.vce_active = false; mutex_unlock(&adev->pm.mutex); + amdgpu_pm_compute_clocks(adev); } - amdgpu_pm_compute_clocks(adev); + } } @@ -1286,7 +1296,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) if (!adev->pm.dpm_enabled) return; - amdgpu_display_bandwidth_update(adev); + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1154b0a8881d..4c6094eefc51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -529,6 +529,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_ case TTM_PL_TT: break; case TTM_PL_VRAM: + if (mem->start == AMDGPU_BO_INVALID_OFFSET) + return -EINVAL; + mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6f62ac473064..6d6ab7f11b4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1113,6 +1113,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) amdgpu_dpm_enable_uvd(adev, false); } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); + /* shutdown the UVD block */ + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); } } else { schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); @@ -1129,6 +1134,10 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) amdgpu_dpm_enable_uvd(adev, true); } else { amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 79bc9c7aad45..e2c06780ce49 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -321,6 +321,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_dpm_enable_vce(adev, false); } else { amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); } } else { schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT); @@ -346,6 +350,11 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) amdgpu_dpm_enable_vce(adev, true); } else { amdgpu_asic_set_vce_clocks(adev, 53300, 40000); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } } mutex_unlock(&adev->vce.idle_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 3fd951c71d1b..dcfb7df3caf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -83,7 +83,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm) DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); amdgpu_vm_bo_rmv(adev, bo_va); ttm_eu_backoff_reservation(&ticket, &list); - kfree(bo_va); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 9498e78b90d7..f97ecb49972e 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -2210,7 +2210,6 @@ static void ci_clear_vc(struct amdgpu_device *adev) static int ci_upload_firmware(struct amdgpu_device *adev) { - struct ci_power_info *pi = ci_get_pi(adev); int i, ret; if (amdgpu_ci_is_smc_running(adev)) { @@ -2227,7 +2226,7 @@ static int ci_upload_firmware(struct amdgpu_device *adev) amdgpu_ci_stop_smc_clock(adev); amdgpu_ci_reset_smc(adev); - ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end); + ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END); return ret; @@ -4257,12 +4256,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) { if (amdgpu_new_state->evclk) { - /* turn the clocks on when encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - if (ret) - return ret; - pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev); tmp = RREG32_SMC(ixDPM_TABLE_475); tmp &= ~DPM_TABLE_475__VceBootLevel_MASK; @@ -4274,9 +4267,6 @@ static int ci_update_vce_dpm(struct amdgpu_device *adev, ret = ci_enable_vce_dpm(adev, false); if (ret) return ret; - /* turn the clocks off when not encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); } } return ret; @@ -6278,13 +6268,13 @@ static int ci_dpm_sw_init(void *handle) adev->pm.current_mclk = adev->clock.default_mclk; adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - if (amdgpu_dpm == 0) - return 0; - ret = ci_dpm_init_microcode(adev); if (ret) return ret; + if (amdgpu_dpm == 0) + return 0; + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); mutex_lock(&adev->pm.mutex); ret = ci_dpm_init(adev); @@ -6328,8 +6318,15 @@ static int ci_dpm_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_dpm) + if (!amdgpu_dpm) { + ret = ci_upload_firmware(adev); + if (ret) { + DRM_ERROR("ci_upload_firmware failed\n"); + return ret; + } + ci_dpm_start_smc(adev); return 0; + } mutex_lock(&adev->pm.mutex); ci_dpm_setup_asic(adev); @@ -6351,6 +6348,8 @@ static int ci_dpm_hw_fini(void *handle) mutex_lock(&adev->pm.mutex); ci_dpm_disable(adev); mutex_unlock(&adev->pm.mutex); + } else { + ci_dpm_stop_smc(adev); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 7da688b0d27d..c4d4b35e54ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1176,6 +1176,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { /* enable BM */ pci_set_master(adev->pdev); + adev->has_hw_reset = true; r = 0; break; } @@ -1722,8 +1723,8 @@ static int cik_common_early_init(void *handle) AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_DMG |*/ AMD_PG_SUPPORT_UVD | - /*AMD_PG_SUPPORT_VCE | - AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_VCE | + /* AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_ACP | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 1cf1d9d1aec1..5b24e89552ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -3737,9 +3737,15 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev, default: encoder->possible_crtcs = 0x3; break; + case 3: + encoder->possible_crtcs = 0x7; + break; case 4: encoder->possible_crtcs = 0xf; break; + case 5: + encoder->possible_crtcs = 0x1f; + break; case 6: encoder->possible_crtcs = 0x3f; break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 762f8e82ceb7..e9a176891e13 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = static void dce_virtual_encoder_destroy(struct drm_encoder *encoder) { - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - - kfree(amdgpu_encoder->enc_priv); drm_encoder_cleanup(encoder); - kfree(amdgpu_encoder); + kfree(encoder); } static const struct drm_encoder_funcs dce_virtual_encoder_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index c998f6aaaf36..2086e7e68de4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1325,21 +1325,19 @@ static u32 gfx_v6_0_create_bitmask(u32 bit_width) return (u32)(((u64)1 << bit_width) - 1); } -static u32 gfx_v6_0_get_rb_disabled(struct amdgpu_device *adev, - u32 max_rb_num_per_se, - u32 sh_per_se) +static u32 gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; - data = RREG32(mmCC_RB_BACKEND_DISABLE); - data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; - data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); + data = RREG32(mmCC_RB_BACKEND_DISABLE) | + RREG32(mmGC_USER_RB_BACKEND_DISABLE); - data >>= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; + data = REG_GET_FIELD(data, GC_USER_RB_BACKEND_DISABLE, BACKEND_DISABLE); - mask = gfx_v6_0_create_bitmask(max_rb_num_per_se / sh_per_se); + mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_backends_per_se/ + adev->gfx.config.max_sh_per_se); - return data & mask; + return ~data & mask; } static void gfx_v6_0_raster_config(struct amdgpu_device *adev, u32 *rconf) @@ -1468,68 +1466,55 @@ static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device *adev, gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); } -static void gfx_v6_0_setup_rb(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 max_rb_num_per_se) +static void gfx_v6_0_setup_rb(struct amdgpu_device *adev) { int i, j; - u32 data, mask; - u32 disabled_rbs = 0; - u32 enabled_rbs = 0; + u32 data; + u32 raster_config = 0; + u32 active_rbs = 0; + u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se / + adev->gfx.config.max_sh_per_se; unsigned num_rb_pipes; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); - data = gfx_v6_0_get_rb_disabled(adev, max_rb_num_per_se, sh_per_se); - disabled_rbs |= data << ((i * sh_per_se + j) * 2); + data = gfx_v6_0_get_rb_active_bitmap(adev); + active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * + rb_bitmap_width_per_sh); } } gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - - mask = 1; - for (i = 0; i < max_rb_num_per_se * se_num; i++) { - if (!(disabled_rbs & mask)) - enabled_rbs |= mask; - mask <<= 1; - } - adev->gfx.config.backend_enable_mask = enabled_rbs; - adev->gfx.config.num_rbs = hweight32(enabled_rbs); + adev->gfx.config.backend_enable_mask = active_rbs; + adev->gfx.config.num_rbs = hweight32(active_rbs); num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se * adev->gfx.config.max_shader_engines, 16); - mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - gfx_v6_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff); - data = 0; - for (j = 0; j < sh_per_se; j++) { - switch (enabled_rbs & 3) { - case 1: - data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2); - break; - case 2: - data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2); - break; - case 3: - default: - data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2); - break; - } - enabled_rbs >>= 2; - } - gfx_v6_0_raster_config(adev, &data); + gfx_v6_0_raster_config(adev, &raster_config); - if (!adev->gfx.config.backend_enable_mask || - adev->gfx.config.num_rbs >= num_rb_pipes) - WREG32(mmPA_SC_RASTER_CONFIG, data); - else - gfx_v6_0_write_harvested_raster_configs(adev, data, - adev->gfx.config.backend_enable_mask, - num_rb_pipes); + if (!adev->gfx.config.backend_enable_mask || + adev->gfx.config.num_rbs >= num_rb_pipes) { + WREG32(mmPA_SC_RASTER_CONFIG, raster_config); + } else { + gfx_v6_0_write_harvested_raster_configs(adev, raster_config, + adev->gfx.config.backend_enable_mask, + num_rb_pipes); + } + + /* cache the values for userspace */ + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + adev->gfx.config.rb_config[i][j].rb_backend_disable = + RREG32(mmCC_RB_BACKEND_DISABLE); + adev->gfx.config.rb_config[i][j].user_rb_backend_disable = + RREG32(mmGC_USER_RB_BACKEND_DISABLE); + adev->gfx.config.rb_config[i][j].raster_config = + RREG32(mmPA_SC_RASTER_CONFIG); + } } gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); @@ -1540,36 +1525,44 @@ static void gmc_v6_0_init_compute_vmid(struct amdgpu_device *adev) } */ -static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev, u32 cu_per_sh) +static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) { - u32 data, mask; + u32 data; - data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - data |= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); + if (!bitmap) + return; - data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; - mask = gfx_v6_0_create_bitmask(cu_per_sh); + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} - return ~data & mask; +static u32 gfx_v6_0_get_cu_enabled(struct amdgpu_device *adev) +{ + u32 data, mask; + + data = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG) | + RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); + + mask = gfx_v6_0_create_bitmask(adev->gfx.config.max_cu_per_sh); + return ~REG_GET_FIELD(data, CC_GC_SHADER_ARRAY_CONFIG, INACTIVE_CUS) & mask; } -static void gfx_v6_0_setup_spi(struct amdgpu_device *adev, - u32 se_num, u32 sh_per_se, - u32 cu_per_sh) +static void gfx_v6_0_setup_spi(struct amdgpu_device *adev) { int i, j, k; u32 data, mask; u32 active_cu = 0; mutex_lock(&adev->grbm_idx_mutex); - for (i = 0; i < se_num; i++) { - for (j = 0; j < sh_per_se; j++) { + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); data = RREG32(mmSPI_STATIC_THREAD_MGMT_3); - active_cu = gfx_v6_0_get_cu_enabled(adev, cu_per_sh); + active_cu = gfx_v6_0_get_cu_enabled(adev); mask = 1; for (k = 0; k < 16; k++) { @@ -1717,6 +1710,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) gb_addr_config |= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT; break; } + gb_addr_config &= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK; + if (adev->gfx.config.max_shader_engines == 2) + gb_addr_config |= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT; adev->gfx.config.gb_addr_config = gb_addr_config; WREG32(mmGB_ADDR_CONFIG, gb_addr_config); @@ -1735,13 +1731,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) #endif gfx_v6_0_tiling_mode_table_init(adev); - gfx_v6_0_setup_rb(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_backends_per_se); + gfx_v6_0_setup_rb(adev); - gfx_v6_0_setup_spi(adev, adev->gfx.config.max_shader_engines, - adev->gfx.config.max_sh_per_se, - adev->gfx.config.max_cu_per_sh); + gfx_v6_0_setup_spi(adev); gfx_v6_0_get_cu_info(adev); @@ -2941,61 +2933,16 @@ static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } -static u32 gfx_v6_0_get_cu_active_bitmap(struct amdgpu_device *adev, - u32 se, u32 sh) -{ - - u32 mask = 0, tmp, tmp1; - int i; - - mutex_lock(&adev->grbm_idx_mutex); - gfx_v6_0_select_se_sh(adev, se, sh, 0xffffffff); - tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); - tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); - gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - - tmp &= 0xffff0000; - - tmp |= tmp1; - tmp >>= 16; - - for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { - mask <<= 1; - mask |= 1; - } - - return (~tmp) & mask; -} - static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device *adev) { - u32 i, j, k, active_cu_number = 0; - - u32 mask, counter, cu_bitmap; - u32 tmp = 0; - - for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { - for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - mask = 1; - cu_bitmap = 0; - counter = 0; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { - if (gfx_v6_0_get_cu_active_bitmap(adev, i, j) & mask) { - if (counter < 2) - cu_bitmap |= mask; - counter++; - } - mask <<= 1; - } + u32 tmp; - active_cu_number += counter; - tmp |= (cu_bitmap << (i * 16 + j * 8)); - } - } + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); - WREG32(mmRLC_PG_AO_CU_MASK, tmp); - WREG32_FIELD(RLC_MAX_PG_CU, MAX_POWERED_UP_CU, active_cu_number); + tmp = RREG32(mmRLC_MAX_PG_CU); + tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; + tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); + WREG32(mmRLC_MAX_PG_CU, tmp); } static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, @@ -3770,18 +3717,26 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - bitmap = gfx_v6_0_get_cu_active_bitmap(adev, i, j); + gfx_v6_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v6_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); + bitmap = gfx_v6_0_get_cu_enabled(adev); cu_info->bitmap[i][j] = bitmap; - for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + for (k = 0; k < 16; k++) { if (bitmap & mask) { if (counter < 2) ao_bitmap |= mask; @@ -3794,6 +3749,9 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev) } } + gfx_v6_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index e3589b55a1e1..1f9354541f29 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1983,6 +1983,14 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); udelay(50); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 35f9cd83b821..67afc901905c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3898,6 +3898,14 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); } @@ -7260,7 +7268,7 @@ static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_ce_ib_state regular; struct amdgpu_ce_ib_state_chained_ib chained; - } ce_payload = {0}; + } ce_payload = {}; if (ring->adev->virt.chained_ib_support) { ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); @@ -7287,7 +7295,7 @@ static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_de_ib_state regular; struct amdgpu_de_ib_state_chained_ib chained; - } de_payload = {0}; + } de_payload = {}; gds_addr = csa_addr + 4096; if (ring->adev->virt.chained_ib_support) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e2b0b1646f99..0635829b18cf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) } WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); + if (adev->mode_info.num_crtc) + amdgpu_display_set_vga_render_state(adev, false); + gmc_v6_0_mc_stop(adev, &save); if (gmc_v6_0_wait_for_idle((void *)adev)) { @@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev) dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v6_0_mc_resume(adev, &save); - amdgpu_display_set_vga_render_state(adev, false); } static int gmc_v6_0_mc_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 8785ca570729..f5a343cb0010 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -1550,11 +1550,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { kv_dpm_powergate_vce(adev, false); - /* turn the clocks on when encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - if (ret) - return ret; if (pi->caps_stable_p_state) pi->vce_boot_level = table->count - 1; else @@ -1573,15 +1568,9 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev, amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_VCEDPM_SetEnabledMask, (1 << pi->vce_boot_level)); - kv_enable_vce_dpm(adev, true); } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { kv_enable_vce_dpm(adev, false); - /* turn the clocks off when not encoding */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - if (ret) - return ret; kv_dpm_powergate_vce(adev, true); } @@ -1688,70 +1677,44 @@ static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate) struct kv_power_info *pi = kv_get_pi(adev); int ret; - if (pi->uvd_power_gated == gate) - return; - pi->uvd_power_gated = gate; if (gate) { - if (pi->caps_uvd_pg) { - /* disable clockgating so we can properly shut down the block */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); - /* shutdown the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - /* XXX: check for errors */ - } + /* stop the UVD block */ + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); kv_update_uvd_dpm(adev, gate); if (pi->caps_uvd_pg) /* power off the UVD block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); } else { - if (pi->caps_uvd_pg) { + if (pi->caps_uvd_pg) /* power on the UVD block */ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); /* re-init the UVD block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */ - ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); - /* XXX: check for errors */ - } kv_update_uvd_dpm(adev, gate); + + ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); } } static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) { struct kv_power_info *pi = kv_get_pi(adev); - int ret; if (pi->vce_power_gated == gate) return; pi->vce_power_gated = gate; - if (gate) { - if (pi->caps_vce_pg) { - /* shutdown the VCE block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - /* XXX: check for errors */ - /* power off the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - } - } else { - if (pi->caps_vce_pg) { - /* power on the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - /* re-init the VCE block */ - ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - /* XXX: check for errors */ - } - } + if (!pi->caps_vce_pg) + return; + + if (gate) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + else + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); } static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) @@ -3009,8 +2972,7 @@ static int kv_dpm_late_init(void *handle) kv_dpm_powergate_acp(adev, true); kv_dpm_powergate_samu(adev, true); - kv_dpm_powergate_vce(adev, true); - kv_dpm_powergate_uvd(adev, true); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index da46992f7b18..b71e3faa40db 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1010,24 +1010,81 @@ static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {PA_SC_RASTER_CONFIG, false, true}, }; -static uint32_t si_read_indexed_register(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, - u32 reg_offset) +static uint32_t si_get_register_value(struct amdgpu_device *adev, + bool indexed, u32 se_num, + u32 sh_num, u32 reg_offset) { - uint32_t val; + if (indexed) { + uint32_t val; + unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; + unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; + + switch (reg_offset) { + case mmCC_RB_BACKEND_DISABLE: + return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; + case mmGC_USER_RB_BACKEND_DISABLE: + return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; + case mmPA_SC_RASTER_CONFIG: + return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; + } - mutex_lock(&adev->grbm_idx_mutex); - if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + mutex_lock(&adev->grbm_idx_mutex); + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); - val = RREG32(reg_offset); + val = RREG32(reg_offset); - if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - return val; + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + return val; + } else { + unsigned idx; + + switch (reg_offset) { + case mmGB_ADDR_CONFIG: + return adev->gfx.config.gb_addr_config; + case mmMC_ARB_RAMCFG: + return adev->gfx.config.mc_arb_ramcfg; + case mmGB_TILE_MODE0: + case mmGB_TILE_MODE1: + case mmGB_TILE_MODE2: + case mmGB_TILE_MODE3: + case mmGB_TILE_MODE4: + case mmGB_TILE_MODE5: + case mmGB_TILE_MODE6: + case mmGB_TILE_MODE7: + case mmGB_TILE_MODE8: + case mmGB_TILE_MODE9: + case mmGB_TILE_MODE10: + case mmGB_TILE_MODE11: + case mmGB_TILE_MODE12: + case mmGB_TILE_MODE13: + case mmGB_TILE_MODE14: + case mmGB_TILE_MODE15: + case mmGB_TILE_MODE16: + case mmGB_TILE_MODE17: + case mmGB_TILE_MODE18: + case mmGB_TILE_MODE19: + case mmGB_TILE_MODE20: + case mmGB_TILE_MODE21: + case mmGB_TILE_MODE22: + case mmGB_TILE_MODE23: + case mmGB_TILE_MODE24: + case mmGB_TILE_MODE25: + case mmGB_TILE_MODE26: + case mmGB_TILE_MODE27: + case mmGB_TILE_MODE28: + case mmGB_TILE_MODE29: + case mmGB_TILE_MODE30: + case mmGB_TILE_MODE31: + idx = (reg_offset - mmGB_TILE_MODE0); + return adev->gfx.config.tile_mode_array[idx]; + default: + return RREG32(reg_offset); + } + } } - static int si_read_register(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 reg_offset, u32 *value) { @@ -1039,10 +1096,9 @@ static int si_read_register(struct amdgpu_device *adev, u32 se_num, continue; if (!si_allowed_read_registers[i].untouched) - *value = si_allowed_read_registers[i].grbm_indexed ? - si_read_indexed_register(adev, se_num, - sh_num, reg_offset) : - RREG32(reg_offset); + *value = si_get_register_value(adev, + si_allowed_read_registers[i].grbm_indexed, + se_num, sh_num, reg_offset); return 0; } return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/si_enums.h b/drivers/gpu/drm/amd/amdgpu/si_enums.h index fde2086246fa..dc9e0e6b4558 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_enums.h +++ b/drivers/gpu/drm/amd/amdgpu/si_enums.h @@ -143,8 +143,8 @@ #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 -#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 -#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 +#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x02010002 +#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 #define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \ (((op) & 0xFF) << 8) | \ diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 7fb9137dd89b..b34cefc7ebd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -159,9 +159,6 @@ static int uvd_v4_2_hw_init(void *handle) uvd_v4_2_enable_mgcg(adev, true); amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - r = uvd_v4_2_start(adev); - if (r) - goto done; ring->ready = true; r = amdgpu_ring_test_ring(ring); @@ -198,7 +195,6 @@ static int uvd_v4_2_hw_init(void *handle) amdgpu_ring_commit(ring); done: - if (!r) DRM_INFO("UVD initialized successfully.\n"); @@ -217,7 +213,9 @@ static int uvd_v4_2_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; - uvd_v4_2_stop(adev); + if (RREG32(mmUVD_STATUS) != 0) + uvd_v4_2_stop(adev); + ring->ready = false; return 0; @@ -267,37 +265,26 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->uvd.ring; uint32_t rb_bufsz; int i, j, r; + u32 tmp; /* disable byte swapping */ u32 lmi_swap_cntl = 0; u32 mp_swap_cntl = 0; - WREG32(mmUVD_CGC_GATE, 0); - uvd_v4_2_set_dcm(adev, true); - - uvd_v4_2_mc_resume(adev); + /* set uvd busy */ + WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); - /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); - - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - mdelay(1); - - /* put LMI, VCPU, RBC etc... into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); - mdelay(5); + uvd_v4_2_set_dcm(adev, true); + WREG32(mmUVD_CGC_GATE, 0); /* take UVD block out of reset */ WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); mdelay(5); - /* initialize UVD memory controller */ - WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | - (1 << 21) | (1 << 9) | (1 << 20)); + /* enable VCPU clock */ + WREG32(mmUVD_VCPU_CNTL, 1 << 9); + + /* disable interupt */ + WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ @@ -306,6 +293,11 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) #endif WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); + /* initialize UVD memory controller */ + WREG32(mmUVD_LMI_CTRL, 0x203108); + + tmp = RREG32(mmUVD_MPC_CNTL); + WREG32(mmUVD_MPC_CNTL, tmp | 0x10); WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); WREG32(mmUVD_MPC_SET_MUXA1, 0x0); @@ -314,18 +306,20 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) WREG32(mmUVD_MPC_SET_ALU, 0); WREG32(mmUVD_MPC_SET_MUX, 0x88); - /* take all subblocks out of reset, except VCPU */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + uvd_v4_2_mc_resume(adev); - /* enable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 1 << 9); + tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); + WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); /* enable UMC */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); - /* boot up the VCPU */ - WREG32(mmUVD_SOFT_RESET, 0); + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); + + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + + WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); for (i = 0; i < 10; ++i) { @@ -357,6 +351,8 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) /* enable interupt */ WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); + WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); + /* force RBC into idle state */ WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); @@ -393,22 +389,57 @@ static int uvd_v4_2_start(struct amdgpu_device *adev) */ static void uvd_v4_2_stop(struct amdgpu_device *adev) { - /* force RBC into idle state */ + uint32_t i, j; + uint32_t status; + WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_STATUS); + if (status & 2) + break; + mdelay(1); + } + if (status & 2) + break; + } + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_LMI_STATUS); + if (status & 0xf) + break; + mdelay(1); + } + if (status & 0xf) + break; + } + /* Stall UMC and register bus before resetting VCPU */ WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - mdelay(1); - /* put VCPU into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmUVD_LMI_STATUS); + if (status & 0x240) + break; + mdelay(1); + } + if (status & 0x240) + break; + } - /* disable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 0x0); + WREG32_P(0x3D49, 0, ~(1 << 2)); - /* Unstall UMC and register bus */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + + WREG32(mmUVD_STATUS, 0); uvd_v4_2_set_dcm(adev, false); } @@ -694,8 +725,26 @@ static int uvd_v4_2_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); + if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { + if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { + WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); + mdelay(20); + } + } return 0; } else { + if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { + WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | + UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); + mdelay(30); + } + } return uvd_v4_2_start(adev); } } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 9b49824233ae..ad8c02e423d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle) uint32_t tmp; int r; - r = uvd_v5_0_start(adev); - if (r) - goto done; + amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); + uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); + uvd_v5_0_enable_mgcg(adev, true); ring->ready = true; r = amdgpu_ring_test_ring(ring); @@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle) amdgpu_ring_write(ring, 3); amdgpu_ring_commit(ring); + done: if (!r) DRM_INFO("UVD initialized successfully.\n"); return r; + } /** @@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; - uvd_v5_0_stop(adev); + if (RREG32(mmUVD_STATUS) != 0) + uvd_v5_0_stop(adev); + ring->ready = false; return 0; @@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev) uvd_v5_0_mc_resume(adev); - amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); - uvd_v5_0_enable_mgcg(adev, true); - /* disable interupt */ WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); @@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev) /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + WREG32(mmUVD_STATUS, 0); } /** @@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - if (enable) { /* wait for STATUS to clear */ if (uvd_v5_0_wait_for_idle(handle)) @@ -824,17 +823,12 @@ static int uvd_v5_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); - adev->uvd.is_powergated = true; } else { ret = uvd_v5_0_start(adev); if (ret) goto out; - adev->uvd.is_powergated = false; } out: @@ -848,7 +842,8 @@ static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->uvd.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index de7e03544d00..18a6de4e1512 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle) uint32_t tmp; int r; - r = uvd_v6_0_start(adev); - if (r) - goto done; + amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); + uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); + uvd_v6_0_enable_mgcg(adev, true); ring->ready = true; r = amdgpu_ring_test_ring(ring); @@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->uvd.ring; - uvd_v6_0_stop(adev); + if (RREG32(mmUVD_STATUS) != 0) + uvd_v6_0_stop(adev); + ring->ready = false; return 0; @@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) lmi_swap_cntl = 0; mp_swap_cntl = 0; - amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); - uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); - uvd_v6_0_enable_mgcg(adev, true); uvd_v6_0_mc_resume(adev); /* disable interupt */ @@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev) /* Unstall UMC and register bus */ WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + + WREG32(mmUVD_STATUS, 0); } /** @@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) - return 0; - if (enable) { /* wait for STATUS to clear */ if (uvd_v6_0_wait_for_idle(handle)) @@ -1049,19 +1047,14 @@ static int uvd_v6_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; - if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) - return 0; - WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); - adev->uvd.is_powergated = true; } else { ret = uvd_v6_0_start(adev); if (ret) goto out; - adev->uvd.is_powergated = false; } out: @@ -1075,7 +1068,8 @@ static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->uvd.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 38ed903dd6f8..9ea99348e493 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -42,10 +42,9 @@ #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES) #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 -static void vce_v2_0_mc_resume(struct amdgpu_device *adev); static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev); -static int vce_v2_0_wait_for_idle(void *handle); + /** * vce_v2_0_ring_get_rptr - get read pointer * @@ -140,6 +139,86 @@ static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev) return -ETIMEDOUT; } +static void vce_v2_0_disable_cg(struct amdgpu_device *adev) +{ + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); +} + +static void vce_v2_0_init_cg(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32(mmVCE_CLOCK_GATING_A); + tmp &= ~0xfff; + tmp |= ((0 << 0) | (4 << 4)); + tmp |= 0x40000; + WREG32(mmVCE_CLOCK_GATING_A, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0xfff; + tmp |= ((0 << 0) | (4 << 4)); + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0x10; + tmp &= ~0x100000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); +} + +static void vce_v2_0_mc_resume(struct amdgpu_device *adev) +{ + uint64_t addr = adev->vce.gpu_addr; + uint32_t size; + + WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); + WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); + WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); + WREG32(mmVCE_CLOCK_GATING_B, 0xf7); + + WREG32(mmVCE_LMI_CTRL, 0x00398000); + WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); + WREG32(mmVCE_LMI_SWAP_CNTL, 0); + WREG32(mmVCE_LMI_SWAP_CNTL1, 0); + WREG32(mmVCE_LMI_VM_CTRL, 0); + + addr += AMDGPU_VCE_FIRMWARE_OFFSET; + size = VCE_V2_0_FW_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE0, size); + + addr += size; + size = VCE_V2_0_STACK_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE1, size); + + addr += size; + size = VCE_V2_0_DATA_SIZE; + WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); + WREG32(mmVCE_VCPU_CACHE_SIZE2, size); + + WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); + WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); +} + +static bool vce_v2_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); +} + +static int vce_v2_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + unsigned i; + + for (i = 0; i < adev->usec_timeout; i++) { + if (vce_v2_0_is_idle(handle)) + return 0; + } + return -ETIMEDOUT; +} + /** * vce_v2_0_start - start VCE block * @@ -152,11 +231,14 @@ static int vce_v2_0_start(struct amdgpu_device *adev) struct amdgpu_ring *ring; int r; - vce_v2_0_mc_resume(adev); - /* set BUSY flag */ WREG32_P(mmVCE_STATUS, 1, ~1); + vce_v2_0_init_cg(adev); + vce_v2_0_disable_cg(adev); + + vce_v2_0_mc_resume(adev); + ring = &adev->vce.ring[0]; WREG32(mmVCE_RB_RPTR, ring->wptr); WREG32(mmVCE_RB_WPTR, ring->wptr); @@ -189,6 +271,145 @@ static int vce_v2_0_start(struct amdgpu_device *adev) return 0; } +static int vce_v2_0_stop(struct amdgpu_device *adev) +{ + int i, j; + int status; + + if (vce_v2_0_lmi_clean(adev)) { + DRM_INFO("vce is not idle \n"); + return 0; + } +/* + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmVCE_FW_REG_STATUS); + if (!(status & 1)) + break; + mdelay(1); + } + break; + } +*/ + if (vce_v2_0_wait_for_idle(adev)) { + DRM_INFO("VCE is busy, Can't set clock gateing"); + return 0; + } + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8)); + + for (i = 0; i < 10; ++i) { + for (j = 0; j < 100; ++j) { + status = RREG32(mmVCE_LMI_STATUS); + if (status & 0x240) + break; + mdelay(1); + } + break; + } + + WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1); + + WREG32(mmVCE_STATUS, 0); + + return 0; +} + +static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) +{ + u32 tmp; + + if (gated) { + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0xe70000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0xff000000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); + } else { + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp |= 0xe7; + tmp &= ~0xe70000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + + tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp |= 0x1fe000; + tmp &= ~0xff000000; + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp |= 0x3fc; + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + } +} + +static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) +{ + u32 orig, tmp; + +/* LMI_MC/LMI_UMC always set in dynamic, + * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} + */ + tmp = RREG32(mmVCE_CLOCK_GATING_B); + tmp &= ~0x00060006; + +/* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */ + if (gated) { + tmp |= 0xe10000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + } else { + tmp |= 0xe1; + tmp &= ~0xe10000; + WREG32(mmVCE_CLOCK_GATING_B, tmp); + } + + orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING); + tmp &= ~0x1fe000; + tmp &= ~0xff000000; + if (tmp != orig) + WREG32(mmVCE_UENC_CLOCK_GATING, tmp); + + orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); + tmp &= ~0x3fc; + if (tmp != orig) + WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); + + /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ + WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); + + if(gated) + WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); +} + +static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable, + bool sw_cg) +{ + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { + if (sw_cg) + vce_v2_0_set_sw_cg(adev, true); + else + vce_v2_0_set_dyn_cg(adev, true); + } else { + vce_v2_0_disable_cg(adev); + + if (sw_cg) + vce_v2_0_set_sw_cg(adev, false); + else + vce_v2_0_set_dyn_cg(adev, false); + } +} + static int vce_v2_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -254,11 +475,8 @@ static int vce_v2_0_hw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v2_0_start(adev); - /* this error mean vcpu not in running state, so just skip ring test, not stop driver initialize */ - if (r) - return 0; - + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); + vce_v2_0_enable_mgcg(adev, true, false); for (i = 0; i < adev->vce.num_rings; i++) adev->vce.ring[i].ready = false; @@ -312,190 +530,6 @@ static int vce_v2_0_resume(void *handle) return r; } -static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated) -{ - u32 tmp; - - if (gated) { - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp |= 0xe70000; - WREG32(mmVCE_CLOCK_GATING_B, tmp); - - tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp |= 0xff000000; - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); - - tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp &= ~0x3fc; - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); - - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - } else { - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp |= 0xe7; - tmp &= ~0xe70000; - WREG32(mmVCE_CLOCK_GATING_B, tmp); - - tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp |= 0x1fe000; - tmp &= ~0xff000000; - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); - - tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING); - tmp |= 0x3fc; - WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp); - } -} - -static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated) -{ - if (vce_v2_0_wait_for_idle(adev)) { - DRM_INFO("VCE is busy, Can't set clock gateing"); - return; - } - - WREG32_P(mmVCE_LMI_CTRL2, 0x100, ~0x100); - - if (vce_v2_0_lmi_clean(adev)) { - DRM_INFO("LMI is busy, Can't set clock gateing"); - return; - } - - WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK); - WREG32_P(mmVCE_SOFT_RESET, - VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, - ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - WREG32(mmVCE_STATUS, 0); - - if (gated) - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0); - /* LMI_MC/LMI_UMC always set in dynamic, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0} */ - if (gated) { - /* Force CLOCK OFF , set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {*, 1} */ - WREG32(mmVCE_CLOCK_GATING_B, 0xe90010); - } else { - /* Force CLOCK ON, set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {1, 0} */ - WREG32(mmVCE_CLOCK_GATING_B, 0x800f1); - } - - /* Set VCE_UENC_CLOCK_GATING always in dynamic mode {*_FORCE_ON, *_FORCE_OFF} = {0, 0}*/; - WREG32(mmVCE_UENC_CLOCK_GATING, 0x40); - - /* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */ - WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00); - - WREG32_P(mmVCE_LMI_CTRL2, 0, ~0x100); - if(!gated) { - WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); - mdelay(100); - WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); - - vce_v2_0_firmware_loaded(adev); - WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK); - } -} - -static void vce_v2_0_disable_cg(struct amdgpu_device *adev) -{ - WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7); -} - -static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) -{ - bool sw_cg = false; - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { - if (sw_cg) - vce_v2_0_set_sw_cg(adev, true); - else - vce_v2_0_set_dyn_cg(adev, true); - } else { - vce_v2_0_disable_cg(adev); - - if (sw_cg) - vce_v2_0_set_sw_cg(adev, false); - else - vce_v2_0_set_dyn_cg(adev, false); - } -} - -static void vce_v2_0_init_cg(struct amdgpu_device *adev) -{ - u32 tmp; - - tmp = RREG32(mmVCE_CLOCK_GATING_A); - tmp &= ~0xfff; - tmp |= ((0 << 0) | (4 << 4)); - tmp |= 0x40000; - WREG32(mmVCE_CLOCK_GATING_A, tmp); - - tmp = RREG32(mmVCE_UENC_CLOCK_GATING); - tmp &= ~0xfff; - tmp |= ((0 << 0) | (4 << 4)); - WREG32(mmVCE_UENC_CLOCK_GATING, tmp); - - tmp = RREG32(mmVCE_CLOCK_GATING_B); - tmp |= 0x10; - tmp &= ~0x100000; - WREG32(mmVCE_CLOCK_GATING_B, tmp); -} - -static void vce_v2_0_mc_resume(struct amdgpu_device *adev) -{ - uint64_t addr = adev->vce.gpu_addr; - uint32_t size; - - WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); - WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); - WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); - WREG32(mmVCE_CLOCK_GATING_B, 0xf7); - - WREG32(mmVCE_LMI_CTRL, 0x00398000); - WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); - WREG32(mmVCE_LMI_SWAP_CNTL, 0); - WREG32(mmVCE_LMI_SWAP_CNTL1, 0); - WREG32(mmVCE_LMI_VM_CTRL, 0); - - addr += AMDGPU_VCE_FIRMWARE_OFFSET; - size = VCE_V2_0_FW_SIZE; - WREG32(mmVCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); - WREG32(mmVCE_VCPU_CACHE_SIZE0, size); - - addr += size; - size = VCE_V2_0_STACK_SIZE; - WREG32(mmVCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff); - WREG32(mmVCE_VCPU_CACHE_SIZE1, size); - - addr += size; - size = VCE_V2_0_DATA_SIZE; - WREG32(mmVCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff); - WREG32(mmVCE_VCPU_CACHE_SIZE2, size); - - WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); - WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1); - - vce_v2_0_init_cg(adev); -} - -static bool vce_v2_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); -} - -static int vce_v2_0_wait_for_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned i; - - for (i = 0; i < adev->usec_timeout; i++) { - if (vce_v2_0_is_idle(handle)) - return 0; - } - return -ETIMEDOUT; -} - static int vce_v2_0_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -539,33 +573,20 @@ static int vce_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static void vce_v2_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); - - if (enable) - tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - else - tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; - - WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); -} - - static int vce_v2_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { bool gate = false; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - + bool sw_cg = false; - vce_v2_0_set_bypass_mode(adev, enable); + struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (state == AMD_CG_STATE_GATE) + if (state == AMD_CG_STATE_GATE) { gate = true; + sw_cg = true; + } - vce_v2_0_enable_mgcg(adev, gate); + vce_v2_0_enable_mgcg(adev, gate, sw_cg); return 0; } @@ -582,12 +603,8 @@ static int vce_v2_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) - return 0; - if (state == AMD_PG_STATE_GATE) - /* XXX do we need a vce_v2_0_stop()? */ - return 0; + return vce_v2_0_stop(adev); else return vce_v2_0_start(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 8db26559fd1b..93ec8815bb13 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -230,10 +230,6 @@ static int vce_v3_0_start(struct amdgpu_device *adev) struct amdgpu_ring *ring; int idx, r; - vce_v3_0_override_vce_clock_gating(adev, true); - if (!(adev->flags & AMD_IS_APU)) - amdgpu_asic_set_vce_clocks(adev, 10000, 10000); - ring = &adev->vce.ring[0]; WREG32(mmVCE_RB_RPTR, ring->wptr); WREG32(mmVCE_RB_WPTR, ring->wptr); @@ -436,9 +432,9 @@ static int vce_v3_0_hw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = vce_v3_0_start(adev); - if (r) - return r; + vce_v3_0_override_vce_clock_gating(adev, true); + if (!(adev->flags & AMD_IS_APU)) + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); for (i = 0; i < adev->vce.num_rings; i++) adev->vce.ring[i].ready = false; @@ -514,6 +510,8 @@ static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx) WREG32(mmVCE_LMI_SWAP_CNTL, 0); WREG32(mmVCE_LMI_SWAP_CNTL1, 0); WREG32(mmVCE_LMI_VM_CTRL, 0); + WREG32_OR(mmVCE_VCPU_CNTL, 0x00100000); + if (adev->asic_type >= CHIP_STONEY) { WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR0, (adev->vce.gpu_addr >> 8)); WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR1, (adev->vce.gpu_addr >> 8)); @@ -766,17 +764,14 @@ static int vce_v3_0_set_powergating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; - if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) - return 0; - if (state == AMD_PG_STATE_GATE) { - adev->vce.is_powergated = true; - /* XXX do we need a vce_v3_0_stop()? */ + ret = vce_v3_0_stop(adev); + if (ret) + goto out; } else { ret = vce_v3_0_start(adev); if (ret) goto out; - adev->vce.is_powergated = false; } out: @@ -790,7 +785,8 @@ static void vce_v3_0_get_clockgating_state(void *handle, u32 *flags) mutex_lock(&adev->pm.mutex); - if (adev->vce.is_powergated) { + if (RREG32_SMC(ixCURRENT_PG_STATUS) & + CURRENT_PG_STATUS__VCE_PG_STATUS_MASK) { DRM_INFO("Cannot get clockgating state when VCE is powergated.\n"); goto out; } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 4922fff08c3c..50bdb24ef8d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -721,6 +721,7 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { /* enable BM */ pci_set_master(adev->pdev); + adev->has_hw_reset = true; return 0; } udelay(1); diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h index f9fd2ea4625b..dbc2e723f659 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h @@ -1310,5 +1310,6 @@ #define ixROM_SW_DATA_62 0xc060012c #define ixROM_SW_DATA_63 0xc0600130 #define ixROM_SW_DATA_64 0xc0600134 +#define ixCURRENT_PG_STATUS 0xc020029c #endif /* SMU_7_0_1_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h index 25882a4dea5d..34c6ff52710e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h @@ -5452,5 +5452,7 @@ #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 #endif /* SMU_7_0_1_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h index a9ef1562f43b..66597c64f525 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_d.h @@ -1121,5 +1121,6 @@ #define ixROM_SW_DATA_62 0xc060011c #define ixROM_SW_DATA_63 0xc0600120 #define ixROM_SW_DATA_64 0xc0600124 +#define ixCURRENT_PG_STATUS 0xc020029c #endif /* SMU_7_1_1_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h index 2c997f7b5d13..fb06f2e2f6e6 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_1_sh_mask.h @@ -4860,5 +4860,7 @@ #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 #endif /* SMU_7_1_1_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index 22dd4c2b7290..4446d43d2a8f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h @@ -1271,5 +1271,6 @@ #define ixROM_SW_DATA_62 0xc060011c #define ixROM_SW_DATA_63 0xc0600120 #define ixROM_SW_DATA_64 0xc0600124 +#define ixCURRENT_PG_STATUS 0xc020029c #endif /* SMU_7_1_2_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h index 518fd02e9d35..627906674fe8 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h @@ -5830,5 +5830,7 @@ #define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 #define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xffffffff #define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 #endif /* SMU_7_1_2_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index eca2b851f25f..0333d880bc9e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h @@ -1244,5 +1244,5 @@ #define ixGC_CAC_ACC_CU14 0xc8 #define ixGC_CAC_ACC_CU15 0xc9 #define ixGC_CAC_OVRD_CU 0xe7 - +#define ixCURRENT_PG_STATUS 0xc020029c #endif /* SMU_7_1_3_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h index 1ede9e274714..654c1093d362 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h @@ -6076,5 +6076,8 @@ #define GC_CAC_OVRD_CU__OVRRD_SELECT__SHIFT 0x0 #define GC_CAC_OVRD_CU__OVRRD_VALUE_MASK 0xffff0000 #define GC_CAC_OVRD_CU__OVRRD_VALUE__SHIFT 0x10 +#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002 +#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004 + #endif /* SMU_7_1_3_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 1d26ae768147..17b9d41f3e87 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -171,6 +171,7 @@ struct cgs_firmware_info { uint32_t ucode_start_address; void *kptr; + bool is_kicker; }; struct cgs_mode_info { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 3eccac735db3..b33935fcf428 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - if (cz_hwmgr->uvd_power_gated == bgate) - return 0; - cz_hwmgr->uvd_power_gated = bgate; if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); cz_dpm_update_uvd_dpm(hwmgr, true); cz_dpm_powerdown_uvd(hwmgr); } else { cz_dpm_powerup_uvd(hwmgr); - cgs_set_powergating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); cz_dpm_update_uvd_dpm(hwmgr, false); } @@ -193,47 +190,34 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_VCEPowerGating)) { - if (cz_hwmgr->vce_power_gated != bgate) { - if (bgate) { - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_GATE); - cgs_set_powergating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - cz_enable_disable_vce_dpm(hwmgr, false); - cz_dpm_powerdown_vce(hwmgr); - cz_hwmgr->vce_power_gated = true; - } else { - cz_dpm_powerup_vce(hwmgr); - cz_hwmgr->vce_power_gated = false; - cgs_set_powergating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - cz_dpm_update_vce_dpm(hwmgr); - cz_enable_disable_vce_dpm(hwmgr, true); - return 0; - } - } + if (bgate) { + cgs_set_powergating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + cz_enable_disable_vce_dpm(hwmgr, false); + cz_dpm_powerdown_vce(hwmgr); + cz_hwmgr->vce_power_gated = true; } else { - cz_hwmgr->vce_power_gated = bgate; + cz_dpm_powerup_vce(hwmgr); + cz_hwmgr->vce_power_gated = false; + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + cgs_set_powergating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); - cz_enable_disable_vce_dpm(hwmgr, !bgate); + cz_enable_disable_vce_dpm(hwmgr, true); return 0; } - if (!cz_hwmgr->vce_power_gated) - cz_dpm_update_vce_dpm(hwmgr); - return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 4b0a94cc995e..953e0c9ad7cd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1396,3 +1396,25 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, return 0; } + +int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, + uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, + uint16_t *load_line) +{ + ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + + const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; + + PP_ASSERT_WITH_CODE((NULL != voltage_info), + "Could not find Voltage Table in BIOS.", return -EINVAL); + + voltage_object = atomctrl_lookup_voltage_type_v3 + (voltage_info, voltage_type, VOLTAGE_OBJ_SVID2); + + *svd_gpio_id = voltage_object->asSVID2Obj.ucSVDGpioId; + *svc_gpio_id = voltage_object->asSVID2Obj.ucSVCGpioId; + *load_line = voltage_object->asSVID2Obj.usLoadLine_PSI; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index fc898afce002..e9fe2e84006b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -311,5 +311,8 @@ extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_a extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); +extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, + uint8_t *svd_gpio_id, uint8_t *svc_gpio_id, + uint16_t *load_line); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index a1fc4fcac1e0..8cf71f3c6d0e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_GATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); smu7_update_uvd_dpm(hwmgr, true); smu7_powerdown_uvd(hwmgr); } else { smu7_powerup_uvd(hwmgr); - cgs_set_powergating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_CG_STATE_UNGATE); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_UNGATE); smu7_update_uvd_dpm(hwmgr, false); } @@ -173,12 +173,12 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - if (data->vce_power_gated == bgate) - return 0; - data->vce_power_gated = bgate; if (bgate) { + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); @@ -186,10 +186,13 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) smu7_powerdown_vce(hwmgr); } else { smu7_powerup_vce(hwmgr); - smu7_update_vce_dpm(hwmgr, false); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); + cgs_set_powergating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + smu7_update_vce_dpm(hwmgr, false); } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 0a6c833720df..f75ee33ec5bb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1383,6 +1383,15 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr) data->force_pcie_gen = PP_PCIEGenInvalid; data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false; + if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) { + uint8_t tmp1, tmp2; + uint16_t tmp3 = 0; + atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2, + &tmp3); + tmp3 = (tmp3 >> 5) & 0x3; + data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3; + } + data->fast_watermark_threshold = 100; if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) @@ -2624,6 +2633,7 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask); smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask); smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask); + break; case AMD_DPM_FORCED_LEVEL_MANUAL: hwmgr->dpm_level = level; @@ -2633,9 +2643,9 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr, break; } - if (level & (AMD_DPM_FORCED_LEVEL_PROFILE_PEAK | AMD_DPM_FORCED_LEVEL_HIGH)) + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100); - else + else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr); return 0; @@ -4397,16 +4407,14 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL) return -EINVAL; dep_sclk_table = table_info->vdd_dep_on_sclk; - for (i = 0; i < dep_sclk_table->count; i++) { + for (i = 0; i < dep_sclk_table->count; i++) clocks->clock[i] = dep_sclk_table->entries[i].clk; - clocks->count++; - } + clocks->count = dep_sclk_table->count; } else if (hwmgr->pp_table_version == PP_TABLE_V0) { sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk; - for (i = 0; i < sclk_table->count; i++) { + for (i = 0; i < sclk_table->count; i++) clocks->clock[i] = sclk_table->entries[i].clk; - clocks->count++; - } + clocks->count = sclk_table->count; } return 0; @@ -4440,14 +4448,13 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) clocks->clock[i] = dep_mclk_table->entries[i].clk; clocks->latency[i] = smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk); - clocks->count++; } + clocks->count = dep_mclk_table->count; } else if (hwmgr->pp_table_version == PP_TABLE_V0) { mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk; - for (i = 0; i < mclk_table->count; i++) { + for (i = 0; i < mclk_table->count; i++) clocks->clock[i] = mclk_table->entries[i].clk; - clocks->count++; - } + clocks->count = mclk_table->count; } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index 27e7f76ad8a6..f221e17b67e7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -268,7 +268,7 @@ struct smu7_hwmgr { uint32_t fast_watermark_threshold; /* ---- Phase Shedding ---- */ - bool vddc_phase_shed_control; + uint8_t vddc_phase_shed_control; /* ---- DI/DT ---- */ struct smu7_display_timing display_timing; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 3341c0fbd069..1dc31aa72781 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -477,6 +477,151 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = { { 0xFFFFFFFF } }; +static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = +{ +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + /* DIDT_SQ */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x004c, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00d0, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0069, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x0048, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x005f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x007a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x001f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x002d, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x0088, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + /* DIDT_TD */ + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + /* DIDT_TCP */ + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } /* End of list */ +}; static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) { @@ -630,7 +775,10 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) } else if (hwmgr->chip_id == CHIP_POLARIS11) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); - result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); + if (hwmgr->smumgr->is_kicker) + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker); + else + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); } else if (hwmgr->chip_id == CHIP_POLARIS12) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 9b6531bd6350..7c318a95e0c2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -137,6 +137,7 @@ struct pp_smumgr { uint32_t usec_timeout; bool reload_fw; const struct pp_smumgr_func *smumgr_funcs; + bool is_kicker; }; extern int smum_early_init(struct pp_instance *handle); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 0e26900e459e..80e2329a1b9e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -494,6 +494,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct pp_smumgr *smumgr = hwmgr->smumgr; state->CcPwrDynRm = 0; state->CcPwrDynRm1 = 0; @@ -502,7 +503,10 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr, state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); - state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; + if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker) + state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; + else + state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1; CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 6749fbe26c74..35ac27681415 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -533,6 +533,8 @@ int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr) cgs_get_firmware_info(smumgr->device, smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info); + smumgr->is_kicker = info.is_kicker; + result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE); return result; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 60c36928284c..c0956a4207a9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -37,8 +37,10 @@ MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin"); MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); +MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 1051181d8c0d..5a8fa1c85229 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -114,6 +114,7 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; + bool DisableP2A; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 5992ed2166ec..993909430736 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } else *need_post = false; + /* Check P2A Access */ + ast->DisableP2A = true; + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) + ast->DisableP2A = false; + /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; + if (ast->DisableP2A == false) { + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x1207c); + data &= 0x300; + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + } } break; } @@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) uint32_t data, data2; uint32_t denum, num, div, ref_pll; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - - ast_write32(ast, 0x10000, 0xfc600309); - - do { - if (pci_channel_offline(dev->pdev)) - return -EIO; - } while (ast_read32(ast, 0x10000) != 0x01); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) + if (ast->DisableP2A) + { ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + ast->mclk = 396; + } else - ast->dram_bus_width = 32; + { + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x10004); + + if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (data & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (data & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (data & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; + break; + } + } - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; + data = ast_read32(ast, 0x10120); + data2 = ast_read32(ast, 0x10170); + if (data2 & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = data & 0x1f; + num = (data & 0x3fe0) >> 5; + data = (data & 0xc000) >> 14; + switch (data) { case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; + div = 0x4; break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; + case 2: + case 1: + div = 0x2; break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; + default: + div = 0x1; break; } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } - - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; - break; - case 2: - case 1: - div = 0x2; - break; - default: - div = 0x1; - break; - } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 810c51d92b99..5331ee1df086 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev) ast_open_key(ast); ast_set_def_ext_reg(dev); - if (ast->chip == AST2300 || ast->chip == AST2400) - ast_init_dram_2300(dev); - else - ast_init_dram_reg(dev); + if (ast->DisableP2A == false) + { + if (ast->chip == AST2300 || ast->chip == AST2400) + ast_init_dram_2300(dev); + else + ast_init_dram_reg(dev); - ast_init_3rdtx(dev); + ast_init_3rdtx(dev); + } + else + { + if (ast->tx_chip_type != AST_TX_NONE) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ + } } /* AST 2300 DRAM settings */ diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 5a0c7082c8f8..afec53832145 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -288,15 +288,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, EXPORT_SYMBOL(drm_atomic_get_crtc_state); static void set_out_fence_for_crtc(struct drm_atomic_state *state, - struct drm_crtc *crtc, s64 __user *fence_ptr) + struct drm_crtc *crtc, s32 __user *fence_ptr) { state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; } -static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, +static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc) { - s64 __user *fence_ptr; + s32 __user *fence_ptr; fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; @@ -507,7 +507,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, state->color_mgmt_changed |= replaced; return ret; } else if (property == config->prop_out_fence_ptr) { - s64 __user *fence_ptr = u64_to_user_ptr(val); + s32 __user *fence_ptr = u64_to_user_ptr(val); if (!fence_ptr) return 0; @@ -1914,7 +1914,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb); */ struct drm_out_fence_state { - s64 __user *out_fence_ptr; + s32 __user *out_fence_ptr; struct sync_file *sync_file; int fd; }; @@ -1951,7 +1951,7 @@ static int prepare_crtc_signaling(struct drm_device *dev, return 0; for_each_new_crtc_in_state(state, crtc, crtc_state, i) { - u64 __user *fence_ptr; + s32 __user *fence_ptr; fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); @@ -2031,13 +2031,16 @@ static void complete_crtc_signaling(struct drm_device *dev, } for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct drm_pending_vblank_event *event = crtc_state->event; /* - * TEST_ONLY and PAGE_FLIP_EVENT are mutually - * exclusive, if they weren't, this code should be - * called on success for TEST_ONLY too. + * Free the allocated event. drm_atomic_helper_setup_commit + * can allocate an event too, so only free it if it's ours + * to prevent a double free in drm_atomic_state_clear. */ - if (crtc_state->event) - drm_event_cancel_free(dev, &crtc_state->event->base); + if (event && (event->base.fence || event->base.file_priv)) { + drm_event_cancel_free(dev, &event->base); + crtc_state->event = NULL; + } } if (!fence_state) diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5d9830f6a190..9203f3e933f7 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -370,7 +370,7 @@ mode_fixup(struct drm_atomic_state *state) struct drm_connector *connector; struct drm_connector_state *conn_state; int i; - bool ret; + int ret; for_each_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->mode_changed && diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index e4d2c8a49076..45464c8b797d 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -378,6 +378,9 @@ int drm_connector_register(struct drm_connector *connector) { int ret = 0; + if (!connector->dev->registered) + return 0; + mutex_lock(&connector->mutex); if (connector->registered) goto unlock; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 45ce224688ce..b5c6bb46a425 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -776,6 +776,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (ret) goto err_minors; + dev->registered = true; + if (dev->driver->load) { ret = dev->driver->load(dev, flags); if (ret) @@ -823,6 +825,8 @@ void drm_dev_unregister(struct drm_device *dev) drm_lastclose(dev); + dev->registered = false; + if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_unregister_all(dev); diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index c91240598471..0dd5da8c55e5 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -858,6 +858,9 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) } fb_helper->fbdev = NULL; + cancel_work_sync(&fb_helper->resume_work); + cancel_work_sync(&fb_helper->dirty_work); + mutex_lock(&kernel_fb_helper_lock); if (!list_empty(&fb_helper->kernel_fb_list)) { list_del(&fb_helper->kernel_fb_list); diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 3dfe3c886502..308d442a531b 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -137,7 +137,7 @@ EXPORT_SYMBOL(drm_panel_detach); * Return: A pointer to the panel registered for the specified device tree * node or NULL if no panel matching the device tree node can be found. */ -struct drm_panel *of_drm_find_panel(struct device_node *np) +struct drm_panel *of_drm_find_panel(const struct device_node *np) { struct drm_panel *panel; diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index d69af00bdd6a..0fd6f7a18364 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -13,9 +13,11 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/component.h> +#include <linux/mfd/syscon.h> #include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pm_runtime.h> +#include <linux/regmap.h> #include <video/exynos5433_decon.h> @@ -25,6 +27,9 @@ #include "exynos_drm_plane.h" #include "exynos_drm_iommu.h" +#define DSD_CFG_MUX 0x1004 +#define DSD_CFG_MUX_TE_UNMASK_GLOBAL BIT(13) + #define WINDOWS_NR 3 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128 @@ -57,6 +62,7 @@ struct decon_context { struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR]; void __iomem *addr; + struct regmap *sysreg; struct clk *clks[ARRAY_SIZE(decon_clks_name)]; int pipe; unsigned long flags; @@ -118,18 +124,29 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) static void decon_setup_trigger(struct decon_context *ctx) { - u32 val = !(ctx->out_type & I80_HW_TRG) - ? TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | - TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN - : TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | - TRIGCON_HWTRIGMASK | TRIGCON_HWTRIGEN; - writel(val, ctx->addr + DECON_TRIGCON); + if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) + return; + + if (!(ctx->out_type & I80_HW_TRG)) { + writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN + | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, + ctx->addr + DECON_TRIGCON); + return; + } + + writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | TRIGCON_HWTRIGMASK + | TRIGCON_HWTRIGEN, ctx->addr + DECON_TRIGCON); + + if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX, + DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0)) + DRM_ERROR("Cannot update sysreg.\n"); } static void decon_commit(struct exynos_drm_crtc *crtc) { struct decon_context *ctx = crtc->ctx; struct drm_display_mode *m = &crtc->base.mode; + bool interlaced = false; u32 val; if (test_bit(BIT_SUSPENDED, &ctx->flags)) @@ -140,13 +157,16 @@ static void decon_commit(struct exynos_drm_crtc *crtc) m->crtc_hsync_end = m->crtc_htotal - 92; m->crtc_vsync_start = m->crtc_vdisplay + 1; m->crtc_vsync_end = m->crtc_vsync_start + 1; + if (m->flags & DRM_MODE_FLAG_INTERLACE) + interlaced = true; } - if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG)) - decon_setup_trigger(ctx); + decon_setup_trigger(ctx); /* lcd on and use command if */ val = VIDOUT_LCD_ON; + if (interlaced) + val |= VIDOUT_INTERLACE_EN_F; if (ctx->out_type & IFTYPE_I80) { val |= VIDOUT_COMMAND_IF; } else { @@ -155,15 +175,21 @@ static void decon_commit(struct exynos_drm_crtc *crtc) writel(val, ctx->addr + DECON_VIDOUTCON0); - val = VIDTCON2_LINEVAL(m->vdisplay - 1) | - VIDTCON2_HOZVAL(m->hdisplay - 1); + if (interlaced) + val = VIDTCON2_LINEVAL(m->vdisplay / 2 - 1) | + VIDTCON2_HOZVAL(m->hdisplay - 1); + else + val = VIDTCON2_LINEVAL(m->vdisplay - 1) | + VIDTCON2_HOZVAL(m->hdisplay - 1); writel(val, ctx->addr + DECON_VIDTCON2); if (!(ctx->out_type & IFTYPE_I80)) { - val = VIDTCON00_VBPD_F( - m->crtc_vtotal - m->crtc_vsync_end - 1) | - VIDTCON00_VFPD_F( - m->crtc_vsync_start - m->crtc_vdisplay - 1); + int vbp = m->crtc_vtotal - m->crtc_vsync_end; + int vfp = m->crtc_vsync_start - m->crtc_vdisplay; + + if (interlaced) + vbp = vbp / 2 - 1; + val = VIDTCON00_VBPD_F(vbp - 1) | VIDTCON00_VFPD_F(vfp - 1); writel(val, ctx->addr + DECON_VIDTCON00); val = VIDTCON01_VSPW_F( @@ -278,12 +304,22 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, if (test_bit(BIT_SUSPENDED, &ctx->flags)) return; - val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y); - writel(val, ctx->addr + DECON_VIDOSDxA(win)); + if (crtc->base.mode.flags & DRM_MODE_FLAG_INTERLACE) { + val = COORDINATE_X(state->crtc.x) | + COORDINATE_Y(state->crtc.y / 2); + writel(val, ctx->addr + DECON_VIDOSDxA(win)); + + val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) | + COORDINATE_Y((state->crtc.y + state->crtc.h) / 2 - 1); + writel(val, ctx->addr + DECON_VIDOSDxB(win)); + } else { + val = COORDINATE_X(state->crtc.x) | COORDINATE_Y(state->crtc.y); + writel(val, ctx->addr + DECON_VIDOSDxA(win)); - val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) | - COORDINATE_Y(state->crtc.y + state->crtc.h - 1); - writel(val, ctx->addr + DECON_VIDOSDxB(win)); + val = COORDINATE_X(state->crtc.x + state->crtc.w - 1) | + COORDINATE_Y(state->crtc.y + state->crtc.h - 1); + writel(val, ctx->addr + DECON_VIDOSDxB(win)); + } val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) | VIDOSD_Wx_ALPHA_B_F(0x0); @@ -355,8 +391,6 @@ static void decon_swreset(struct decon_context *ctx) udelay(10); } - WARN(tries == 0, "failed to disable DECON\n"); - writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0); for (tries = 2000; tries; --tries) { if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET) @@ -557,6 +591,13 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) if (val) { writel(val, ctx->addr + DECON_VIDINTCON1); + if (ctx->out_type & IFTYPE_HDMI) { + val = readl(ctx->addr + DECON_VIDOUTCON0); + val &= VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F; + if (val == + (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) + return IRQ_HANDLED; + } drm_crtc_handle_vblank(&ctx->crtc->base); } @@ -637,6 +678,15 @@ static int exynos5433_decon_probe(struct platform_device *pdev) ctx->out_type |= IFTYPE_I80; } + if (ctx->out_type | I80_HW_TRG) { + ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, + "samsung,disp-sysreg"); + if (IS_ERR(ctx->sysreg)) { + dev_err(dev, "failed to get system register\n"); + return PTR_ERR(ctx->sysreg); + } + } + for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) { struct clk *clk; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 745cfbdf6b39..a9fa444c6053 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -125,10 +125,8 @@ static struct fimd_driver_data exynos3_fimd_driver_data = { .timing_base = 0x20000, .lcdblk_offset = 0x210, .lcdblk_bypass_shift = 1, - .trg_type = I80_HW_TRG, .has_shadowcon = 1, .has_vidoutcon = 1, - .has_trigger_per_te = 1, }; static struct fimd_driver_data exynos4_fimd_driver_data = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 603d8425cca6..2b8bf2dd6387 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -1683,7 +1683,7 @@ struct platform_driver g2d_driver = { .probe = g2d_probe, .remove = g2d_remove, .driver = { - .name = "s5p-g2d", + .name = "exynos-drm-g2d", .owner = THIS_MODULE, .pm = &g2d_pm_ops, .of_match_table = exynos_g2d_match, diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 752e8a3afc79..0814ed76445c 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -35,6 +35,7 @@ #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_graph.h> #include <linux/hdmi.h> #include <linux/component.h> #include <linux/mfd/syscon.h> @@ -132,6 +133,7 @@ struct hdmi_context { struct regulator_bulk_data regul_bulk[ARRAY_SIZE(supply)]; struct regulator *reg_hdmi_en; struct exynos_drm_clk phy_clk; + struct drm_bridge *bridge; }; static inline struct hdmi_context *encoder_to_hdmi(struct drm_encoder *e) @@ -508,9 +510,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = { { .pixel_clock = 27000000, .conf = { - 0x01, 0x51, 0x22, 0x51, 0x08, 0xfc, 0x88, 0x46, - 0x72, 0x50, 0x24, 0x0c, 0x24, 0x0f, 0x7c, 0xa5, - 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30, + 0x01, 0x51, 0x2d, 0x75, 0x01, 0x00, 0x88, 0x02, + 0x72, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac, + 0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30, 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, }, }, @@ -518,9 +520,9 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = { .pixel_clock = 27027000, .conf = { 0x01, 0x51, 0x2d, 0x72, 0x64, 0x09, 0x88, 0xc3, - 0x71, 0x50, 0x24, 0x14, 0x24, 0x0f, 0x7c, 0xa5, - 0xd4, 0x2b, 0x87, 0x00, 0x00, 0x04, 0x00, 0x30, - 0x28, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, + 0x71, 0x50, 0x44, 0x8c, 0x27, 0x00, 0x7c, 0xac, + 0xd6, 0x2b, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30, + 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, }, }, { @@ -586,6 +588,15 @@ static const struct hdmiphy_config hdmiphy_5433_configs[] = { 0x08, 0x10, 0x01, 0x01, 0x48, 0x4a, 0x00, 0x40, }, }, + { + .pixel_clock = 297000000, + .conf = { + 0x01, 0x51, 0x3E, 0x05, 0x40, 0xF0, 0x88, 0xC2, + 0x52, 0x53, 0x44, 0x8C, 0x27, 0x00, 0x7C, 0xAC, + 0xD6, 0x2B, 0x67, 0x00, 0x00, 0x04, 0x00, 0x30, + 0x08, 0x10, 0x01, 0x01, 0x48, 0x40, 0x00, 0x40, + }, + }, }; static const char * const hdmi_clk_gates4[] = { @@ -787,7 +798,8 @@ static void hdmi_reg_infoframes(struct hdmi_context *hdata) sizeof(buf)); if (ret > 0) { hdmi_reg_writeb(hdata, HDMI_VSI_CON, HDMI_VSI_CON_EVERY_VSYNC); - hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, ret); + hdmi_reg_write_buf(hdata, HDMI_VSI_HEADER0, buf, 3); + hdmi_reg_write_buf(hdata, HDMI_VSI_DATA(0), buf + 3, ret - 3); } ret = hdmi_audio_infoframe_init(&frm.audio); @@ -911,7 +923,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder) drm_connector_register(connector); drm_mode_connector_attach_encoder(connector, encoder); - return 0; + if (hdata->bridge) { + encoder->bridge = hdata->bridge; + hdata->bridge->encoder = encoder; + ret = drm_bridge_attach(encoder, hdata->bridge, NULL); + if (ret) + DRM_ERROR("Failed to attach bridge\n"); + } + + return ret; } static bool hdmi_mode_fixup(struct drm_encoder *encoder, @@ -1580,6 +1600,31 @@ static void hdmiphy_clk_enable(struct exynos_drm_clk *clk, bool enable) hdmiphy_disable(hdata); } +static int hdmi_bridge_init(struct hdmi_context *hdata) +{ + struct device *dev = hdata->dev; + struct device_node *ep, *np; + + ep = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1); + if (!ep) + return 0; + + np = of_graph_get_remote_port_parent(ep); + of_node_put(ep); + if (!np) { + DRM_ERROR("failed to get remote port parent"); + return -EINVAL; + } + + hdata->bridge = of_drm_find_bridge(np); + of_node_put(np); + + if (!hdata->bridge) + return -EPROBE_DEFER; + + return 0; +} + static int hdmi_resources_init(struct hdmi_context *hdata) { struct device *dev = hdata->dev; @@ -1619,17 +1664,18 @@ static int hdmi_resources_init(struct hdmi_context *hdata) hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en"); - if (PTR_ERR(hdata->reg_hdmi_en) == -ENODEV) - return 0; + if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) { + if (IS_ERR(hdata->reg_hdmi_en)) + return PTR_ERR(hdata->reg_hdmi_en); - if (IS_ERR(hdata->reg_hdmi_en)) - return PTR_ERR(hdata->reg_hdmi_en); - - ret = regulator_enable(hdata->reg_hdmi_en); - if (ret) - DRM_ERROR("failed to enable hdmi-en regulator\n"); + ret = regulator_enable(hdata->reg_hdmi_en); + if (ret) { + DRM_ERROR("failed to enable hdmi-en regulator\n"); + return ret; + } + } - return ret; + return hdmi_bridge_init(hdata); } static struct of_device_id hdmi_match_types[] = { diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c index 3194e544ee27..b3d70a63c5a3 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c @@ -72,10 +72,8 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev) return NULL; tcon = devm_kzalloc(dev, sizeof(*tcon), GFP_KERNEL); - if (!tcon) { - ret = -ENOMEM; + if (!tcon) goto err_node_put; - } ret = fsl_tcon_init_regmap(dev, tcon, np); if (ret) { @@ -89,9 +87,13 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev) goto err_node_put; } - of_node_put(np); - clk_prepare_enable(tcon->ipg_clk); + ret = clk_prepare_enable(tcon->ipg_clk); + if (ret) { + dev_err(dev, "Couldn't enable the TCON clock\n"); + goto err_node_put; + } + of_node_put(np); dev_info(dev, "Using TCON in bypass mode\n"); return tcon; diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 7311aeab16f7..3b6caaca9751 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -49,20 +49,21 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) if (high_gm) { node = &vgpu->gm.high_gm_node; size = vgpu_hidden_sz(vgpu); - start = gvt_hidden_gmadr_base(gvt); - end = gvt_hidden_gmadr_end(gvt); + start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); + end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); flags = PIN_HIGH; } else { node = &vgpu->gm.low_gm_node; size = vgpu_aperture_sz(vgpu); - start = gvt_aperture_gmadr_base(gvt); - end = gvt_aperture_gmadr_end(gvt); + start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); + end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); flags = PIN_MAPPABLE; } mutex_lock(&dev_priv->drm.struct_mutex); ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, - size, 4096, I915_COLOR_UNEVICTABLE, + size, I915_GTT_PAGE_SIZE, + I915_COLOR_UNEVICTABLE, start, end, flags); mutex_unlock(&dev_priv->drm.struct_mutex); if (ret) @@ -254,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, if (request > avail) goto no_enough_resource; - vgpu_aperture_sz(vgpu) = request; + vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); item = "high GM space"; max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; @@ -265,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, if (request > avail) goto no_enough_resource; - vgpu_hidden_sz(vgpu) = request; + vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); item = "fence"; max = gvt_fence_sz(gvt) - HOST_FENCE; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 9a4b23c3ee97..b9c8e2407682 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -481,7 +481,6 @@ struct parser_exec_state { (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) static unsigned long bypass_scan_mask = 0; -static bool bypass_batch_buffer_scan = true; /* ring ALL, type = 0 */ static struct sub_op_bits sub_op_mi[] = { @@ -1135,6 +1134,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, u32 dword2 = cmd_val(s, 2); u32 plane = (dword0 & GENMASK(12, 8)) >> 8; + info->plane = PRIMARY_PLANE; + switch (plane) { case MI_DISPLAY_FLIP_SKL_PLANE_1_A: info->pipe = PIPE_A; @@ -1148,12 +1149,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, info->pipe = PIPE_C; info->event = PRIMARY_C_FLIP_DONE; break; + + case MI_DISPLAY_FLIP_SKL_PLANE_2_A: + info->pipe = PIPE_A; + info->event = SPRITE_A_FLIP_DONE; + info->plane = SPRITE_PLANE; + break; + case MI_DISPLAY_FLIP_SKL_PLANE_2_B: + info->pipe = PIPE_B; + info->event = SPRITE_B_FLIP_DONE; + info->plane = SPRITE_PLANE; + break; + case MI_DISPLAY_FLIP_SKL_PLANE_2_C: + info->pipe = PIPE_C; + info->event = SPRITE_C_FLIP_DONE; + info->plane = SPRITE_PLANE; + break; + default: gvt_err("unknown plane code %d\n", plane); return -EINVAL; } - info->pipe = PRIMARY_PLANE; info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; info->tile_val = (dword1 & GENMASK(2, 0)); info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; @@ -1525,9 +1542,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) { struct intel_gvt *gvt = s->vgpu->gvt; - if (bypass_batch_buffer_scan) - return 0; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index c0c884aeb30e..6d8fde880c39 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -83,7 +83,7 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) return 0; } -/* EDID with 1024x768 as its resolution */ +/* EDID with 1920x1200 as its resolution */ static unsigned char virtual_dp_monitor_edid[] = { /*Header*/ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, @@ -97,11 +97,16 @@ static unsigned char virtual_dp_monitor_edid[] = { 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, /* Established Timings: maximum resolution is 1024x768 */ 0x21, 0x08, 0x00, - /* Standard Timings. All invalid */ - 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, - 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, - /* 18 Byte Data Blocks 1: invalid */ - 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, + /* + * Standard Timings. + * below new resolutions can be supported: + * 1920x1080, 1280x720, 1280x960, 1280x1024, + * 1440x900, 1600x1200, 1680x1050 + */ + 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, + 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, + /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ + 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, /* 18 Byte Data Blocks 2: invalid */ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, @@ -115,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = { /* Extension Block Count */ 0x00, /* Checksum */ - 0xef, + 0x45, }; #define DPCD_HEADER_SIZE 0xb @@ -328,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu) else return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); } + +/** + * intel_vgpu_reset_display- reset vGPU virtual display emulation + * @vgpu: a vGPU + * + * This function is used to reset vGPU virtual display emulation stuffs + * + */ +void intel_vgpu_reset_display(struct intel_vgpu *vgpu) +{ + emulate_monitor_status_change(vgpu); +} diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index 7a60cb848268..8b234ea961f6 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt); void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); int intel_vgpu_init_display(struct intel_vgpu *vgpu); +void intel_vgpu_reset_display(struct intel_vgpu *vgpu); void intel_vgpu_clean_display(struct intel_vgpu *vgpu); #endif diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f6495c..46eb9fd3c03f 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload) #define get_desc_from_elsp_dwords(ed, i) \ ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) - -#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) -#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) -static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, - unsigned long add, int gmadr_bytes) -{ - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) - return -1; - - *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & - BATCH_BUFFER_ADDR_MASK; - if (gmadr_bytes == 8) { - *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = - add & BATCH_BUFFER_ADDR_HIGH_MASK; - } - - return 0; -} - static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) { - int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; + struct intel_shadow_bb_entry *entry_obj; /* pin the gem object to ggtt */ - if (!list_empty(&workload->shadow_bb)) { - struct intel_shadow_bb_entry *entry_obj = - list_first_entry(&workload->shadow_bb, - struct intel_shadow_bb_entry, - list); - struct intel_shadow_bb_entry *temp; + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { + struct i915_vma *vma; - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, - list) { - struct i915_vma *vma; - - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, - 4, 0); - if (IS_ERR(vma)) { - gvt_err("Cannot pin\n"); - return; - } - - /* FIXME: we are not tracking our pinned VMA leaving it - * up to the core to fix up the stray pin_count upon - * free. - */ - - /* update the relocate gma with shadow batch buffer*/ - set_gma_to_bb_cmd(entry_obj, - i915_ggtt_offset(vma), - gmadr_bytes); + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); + if (IS_ERR(vma)) { + gvt_err("Cannot pin\n"); + return; } + + /* FIXME: we are not tracking our pinned VMA leaving it + * up to the core to fix up the stray pin_count upon + * free. + */ + + /* update the relocate gma with shadow batch buffer*/ + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); + if (gmadr_bytes == 8) + entry_obj->bb_start_cmd_va[2] = 0; } } @@ -515,7 +487,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - if (wa_ctx->indirect_ctx.size == 0) + if (!wa_ctx->indirect_ctx.obj) return; i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); @@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) INIT_LIST_HEAD(&vgpu->workload_q_head[i]); } - vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", sizeof(struct intel_vgpu_workload), 0, SLAB_HWCACHE_ALIGN, NULL); diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 2fae2a2ca96f..1cb29b2d7dc6 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -48,31 +48,6 @@ struct gvt_firmware_header { unsigned char data[1]; }; -#define RD(offset) (readl(mmio + offset.reg)) -#define WR(v, offset) (writel(v, mmio + offset.reg)) - -static void bdw_forcewake_get(void __iomem *mmio) -{ - WR(_MASKED_BIT_DISABLE(0xffff), FORCEWAKE_MT); - - RD(ECOBUS); - - if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL) == 0, 50)) - gvt_err("fail to wait forcewake idle\n"); - - WR(_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL), FORCEWAKE_MT); - - if (wait_for((RD(FORCEWAKE_ACK_HSW) & FORCEWAKE_KERNEL), 50)) - gvt_err("fail to wait forcewake ack\n"); - - if (wait_for((RD(GEN6_GT_THREAD_STATUS_REG) & - GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 50)) - gvt_err("fail to wait c0 wake up\n"); -} - -#undef RD -#undef WR - #define dev_to_drm_minor(d) dev_get_drvdata((d)) static ssize_t @@ -91,9 +66,9 @@ static struct bin_attribute firmware_attr = { .mmap = NULL, }; -static int expose_firmware_sysfs(struct intel_gvt *gvt, - void __iomem *mmio) +static int expose_firmware_sysfs(struct intel_gvt *gvt) { + struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct intel_gvt_mmio_info *e; @@ -132,7 +107,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt, for (j = 0; j < e->length; j += 4) *(u32 *)(p + e->offset + j) = - readl(mmio + e->offset + j); + I915_READ_NOTRACE(_MMIO(e->offset + j)); } memcpy(gvt->firmware.mmio, p, info->mmio_size); @@ -235,7 +210,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) struct gvt_firmware_header *h; const struct firmware *fw; char *path; - void __iomem *mmio; void *mem; int ret; @@ -260,17 +234,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) firmware->mmio = mem; - mmio = pci_iomap(pdev, info->mmio_bar, info->mmio_size); - if (!mmio) { - kfree(path); - kfree(firmware->cfg_space); - kfree(firmware->mmio); - return -EINVAL; - } - - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) - bdw_forcewake_get(mmio); - sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); @@ -300,13 +263,11 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) release_firmware(fw); firmware->firmware_loaded = true; - pci_iounmap(pdev, mmio); return 0; out_free_fw: release_firmware(fw); expose_firmware: - expose_firmware_sysfs(gvt, mmio); - pci_iounmap(pdev, mmio); + expose_firmware_sysfs(gvt); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 47dec4acf7ff..28c92346db0e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( static inline int init_shadow_page(struct intel_vgpu *vgpu, struct intel_vgpu_shadow_page *p, int type) { + struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; + + daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); + if (dma_mapping_error(kdev, daddr)) { + gvt_err("fail to map dma addr\n"); + return -EINVAL; + } + p->vaddr = page_address(p->page); p->type = type; INIT_HLIST_NODE(&p->node); - p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr); - if (p->mfn == INTEL_GVT_INVALID_ADDR) - return -EFAULT; - + p->mfn = daddr >> GTT_PAGE_SHIFT; hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); return 0; } -static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p) +static inline void clean_shadow_page(struct intel_vgpu *vgpu, + struct intel_vgpu_shadow_page *p) { + struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; + + dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, + PCI_DMA_BIDIRECTIONAL); + if (!hlist_unhashed(&p->node)) hash_del(&p->node); } @@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) { trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); - clean_shadow_page(&spt->shadow_page); + clean_shadow_page(spt->vgpu, &spt->shadow_page); intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); list_del_init(&spt->post_shadow_list); @@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, int page_entry_num = GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; - unsigned long mfn; int i; + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) return -EINVAL; @@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, return -ENOMEM; } - mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); - if (mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); - free_page((unsigned long)scratch_pt); - return -EFAULT; + daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, + 4096, PCI_DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, daddr)) { + gvt_err("fail to dmamap scratch_pt\n"); + __free_page(virt_to_page(scratch_pt)); + return -ENOMEM; } - gtt->scratch_pt[type].page_mfn = mfn; + gtt->scratch_pt[type].page_mfn = + (unsigned long)(daddr >> GTT_PAGE_SHIFT); gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", - vgpu->id, type, mfn); + vgpu->id, type, gtt->scratch_pt[type].page_mfn); /* Build the tree by full filled the scratch pt with the entries which * point to the next level scratch pt or scratch page. The @@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, static int release_scratch_page_tree(struct intel_vgpu *vgpu) { int i; + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { if (vgpu->gtt.scratch_pt[i].page != NULL) { + daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << + GTT_PAGE_SHIFT); + dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); __free_page(vgpu->gtt.scratch_pt[i].page); vgpu->gtt.scratch_pt[i].page = NULL; vgpu->gtt.scratch_pt[i].page_mfn = 0; @@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) { int ret; void *page; + struct device *dev = &gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; gvt_dbg_core("init gtt\n"); @@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt_err("fail to allocate scratch ggtt page\n"); return -ENOMEM; } - gvt->gtt.scratch_ggtt_page = virt_to_page(page); - gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); - if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { - gvt_err("fail to translate scratch ggtt page\n"); - __free_page(gvt->gtt.scratch_ggtt_page); - return -EFAULT; + daddr = dma_map_page(dev, virt_to_page(page), 0, + 4096, PCI_DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, daddr)) { + gvt_err("fail to dmamap scratch ggtt page\n"); + __free_page(virt_to_page(page)); + return -ENOMEM; } + gvt->gtt.scratch_ggtt_page = virt_to_page(page); + gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); if (enable_out_of_sync) { ret = setup_spt_oos(gvt); @@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) */ void intel_gvt_clean_gtt(struct intel_gvt *gvt) { + struct device *dev = &gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << + GTT_PAGE_SHIFT); + + dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + __free_page(gvt->gtt.scratch_ggtt_page); if (enable_out_of_sync) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index e6bf5c533fbe..3b9d59e457ba 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -68,8 +68,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { */ int intel_gvt_init_host(void) { - int ret; - if (intel_gvt_host.initialized) return 0; @@ -96,11 +94,6 @@ int intel_gvt_init_host(void) if (!intel_gvt_host.mpt) return -EINVAL; - /* Try to detect if we're running in host instead of VM. */ - ret = intel_gvt_hypervisor_detect_host(); - if (ret) - return -ENODEV; - gvt_dbg_core("Running with hypervisor %s in host mode\n", supported_hypervisors[intel_gvt_host.hypervisor_type]); diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 30e543f5a703..df7f33abd393 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -38,7 +38,6 @@ * both Xen and KVM by providing dedicated hypervisor-related MPT modules. */ struct intel_gvt_mpt { - int (*detect_host)(void); int (*host_init)(struct device *dev, void *gvt, const void *ops); void (*host_exit)(struct device *dev, void *gvt); int (*attach_vgpu)(void *vgpu, unsigned long *handle); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index f7be02ac4be1..92bb247e3478 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_irq_ops *ops = gvt->irq.ops; - u32 changed, masked, unmasked; u32 imr = *(u32 *)p_data; - gvt_dbg_irq("write IMR %x with val %x\n", - reg, imr); - - gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg)); - - /* figure out newly masked/unmasked bits */ - changed = vgpu_vreg(vgpu, reg) ^ imr; - masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed; - unmasked = masked ^ changed; - - gvt_dbg_irq("changed %x, masked %x, unmasked %x\n", - changed, masked, unmasked); + gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n", + reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr); vgpu_vreg(vgpu, reg) = imr; ops->check_pending_irq(vgpu); - gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg)); + return 0; } @@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, { struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_irq_ops *ops = gvt->irq.ops; - u32 changed, enabled, disabled; u32 ier = *(u32 *)p_data; u32 virtual_ier = vgpu_vreg(vgpu, reg); - gvt_dbg_irq("write master irq reg %x with val %x\n", - reg, ier); - - gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg)); + gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n", + reg, ier, virtual_ier, virtual_ier ^ ier); /* * GEN8_MASTER_IRQ is a special irq register, @@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; vgpu_vreg(vgpu, reg) |= ier; - /* figure out newly enabled/disable bits */ - changed = virtual_ier ^ ier; - enabled = (virtual_ier & changed) ^ changed; - disabled = enabled ^ changed; - - gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", - changed, enabled, disabled); - ops->check_pending_irq(vgpu); - gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg)); + return 0; } @@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_irq_ops *ops = gvt->irq.ops; struct intel_gvt_irq_info *info; - u32 changed, enabled, disabled; u32 ier = *(u32 *)p_data; - gvt_dbg_irq("write IER %x with val %x\n", - reg, ier); - - gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg)); + gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n", + reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier); - /* figure out newly enabled/disable bits */ - changed = vgpu_vreg(vgpu, reg) ^ ier; - enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed; - disabled = enabled ^ changed; - - gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", - changed, enabled, disabled); vgpu_vreg(vgpu, reg) = ier; info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); @@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, update_upstream_irq(vgpu, info); ops->check_pending_irq(vgpu); - gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg)); + return 0; } @@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, iir_to_regbase(reg)); u32 iir = *(u32 *)p_data; - gvt_dbg_irq("write IIR %x with val %x\n", reg, iir); + gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n", + reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir); if (WARN_ON(!info)) return -EINVAL; @@ -619,6 +588,10 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); + + SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); + SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); + SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); } /* GEN8 interrupt PCU events */ diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 0c9234a87a20..0f7f5d97f582 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -77,7 +77,7 @@ struct kvmgt_guest_info { struct gvt_dma { struct rb_node node; gfn_t gfn; - kvm_pfn_t pfn; + unsigned long iova; }; static inline bool handle_valid(unsigned long handle) @@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev); static void intel_vgpu_release_work(struct work_struct *work); static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); +static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, + unsigned long *iova) +{ + struct page *page; + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; + + page = pfn_to_page(pfn); + if (is_error_page(page)) + return -EFAULT; + + daddr = dma_map_page(dev, page, 0, PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, daddr)) + return -ENOMEM; + + *iova = (unsigned long)(daddr >> PAGE_SHIFT); + return 0; +} + +static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova) +{ + struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; + dma_addr_t daddr; + + daddr = (dma_addr_t)(iova << PAGE_SHIFT); + dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +} + static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) { struct rb_node *node = vgpu->vdev.cache.rb_node; @@ -111,21 +140,22 @@ out: return ret; } -static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) +static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) { struct gvt_dma *entry; - kvm_pfn_t pfn; + unsigned long iova; mutex_lock(&vgpu->vdev.cache_lock); entry = __gvt_cache_find(vgpu, gfn); - pfn = (entry == NULL) ? 0 : entry->pfn; + iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova; mutex_unlock(&vgpu->vdev.cache_lock); - return pfn; + return iova; } -static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) +static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, + unsigned long iova) { struct gvt_dma *new, *itr; struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; @@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) return; new->gfn = gfn; - new->pfn = pfn; + new->iova = iova; mutex_lock(&vgpu->vdev.cache_lock); while (*link) { @@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) } g1 = gfn; + gvt_dma_unmap_iova(vgpu, this->iova); rc = vfio_unpin_pages(dev, &g1, 1); WARN_ON(rc != 1); __gvt_cache_remove_entry(vgpu, this); @@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) mutex_lock(&vgpu->vdev.cache_lock); while ((node = rb_first(&vgpu->vdev.cache))) { dma = rb_entry(node, struct gvt_dma, node); + gvt_dma_unmap_iova(vgpu, dma->iova); gfn = dma->gfn; vfio_unpin_pages(dev, &gfn, 1); @@ -230,8 +262,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, return NULL; } -static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, - char *buf) +static ssize_t available_instances_show(struct kobject *kobj, + struct device *dev, char *buf) { struct intel_vgpu_type *type; unsigned int num = 0; @@ -269,12 +301,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, type->fence); } -static MDEV_TYPE_ATTR_RO(available_instance); +static MDEV_TYPE_ATTR_RO(available_instances); static MDEV_TYPE_ATTR_RO(device_api); static MDEV_TYPE_ATTR_RO(description); static struct attribute *type_attrs[] = { - &mdev_type_attr_available_instance.attr, + &mdev_type_attr_available_instances.attr, &mdev_type_attr_device_api.attr, &mdev_type_attr_description.attr, NULL, @@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, sparse->areas[0].offset = PAGE_ALIGN(vgpu_aperture_offset(vgpu)); sparse->areas[0].size = vgpu_aperture_sz(vgpu); - if (!caps.buf) { - kfree(caps.buf); - caps.buf = NULL; - caps.size = 0; - } break; case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: @@ -1248,43 +1275,6 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, spin_unlock(&kvm->mmu_lock); } -static bool kvmgt_check_guest(void) -{ - unsigned int eax, ebx, ecx, edx; - char s[12]; - unsigned int *i; - - eax = KVM_CPUID_SIGNATURE; - ebx = ecx = edx = 0; - - asm volatile ("cpuid" - : "+a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) - : - : "cc", "memory"); - i = (unsigned int *)s; - i[0] = ebx; - i[1] = ecx; - i[2] = edx; - - return !strncmp(s, "KVMKVMKVM", strlen("KVMKVMKVM")); -} - -/** - * NOTE: - * It's actually impossible to check if we are running in KVM host, - * since the "KVM host" is simply native. So we only dectect guest here. - */ -static int kvmgt_detect_host(void) -{ -#ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_gfx_mapped) { - gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n"); - return -ENODEV; - } -#endif - return kvmgt_check_guest() ? -ENODEV : 0; -} - static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm) { struct intel_vgpu *itr; @@ -1390,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) { - unsigned long pfn; + unsigned long iova, pfn; struct kvmgt_guest_info *info; struct device *dev; int rc; @@ -1399,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) return INTEL_GVT_INVALID_ADDR; info = (struct kvmgt_guest_info *)handle; - pfn = gvt_cache_find(info->vgpu, gfn); - if (pfn != 0) - return pfn; + iova = gvt_cache_find(info->vgpu, gfn); + if (iova != INTEL_GVT_INVALID_ADDR) + return iova; pfn = INTEL_GVT_INVALID_ADDR; dev = mdev_dev(info->vgpu->vdev.mdev); @@ -1410,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); return INTEL_GVT_INVALID_ADDR; } + /* transfer to host iova for GFX to use DMA */ + rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); + if (rc) { + gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); + vfio_unpin_pages(dev, &gfn, 1); + return INTEL_GVT_INVALID_ADDR; + } - gvt_cache_add(info->vgpu, gfn, pfn); - return pfn; + gvt_cache_add(info->vgpu, gfn, iova); + return iova; } static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, @@ -1459,7 +1456,6 @@ static unsigned long kvmgt_virt_to_pfn(void *addr) } struct intel_gvt_mpt kvmgt_mpt = { - .detect_host = kvmgt_detect_host, .host_init = kvmgt_host_init, .host_exit = kvmgt_host_exit, .attach_vgpu = kvmgt_attach_vgpu, diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 1af5830c0a56..419353624c5a 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -44,18 +44,6 @@ */ /** - * intel_gvt_hypervisor_detect_host - check if GVT-g is running within - * hypervisor host/privilged domain - * - * Returns: - * Zero on success, -ENODEV if current kernel is running inside a VM - */ -static inline int intel_gvt_hypervisor_detect_host(void) -{ - return intel_gvt_host.mpt->detect_host(); -} - -/** * intel_gvt_hypervisor_host_init - init GVT-g host side * * Returns: diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 44136b1f3aab..2b3a642284b6 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) } } +#define CTX_CONTEXT_CONTROL_VAL 0x03 + void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct render_mmio *mmio; u32 v; int i, array_size; + u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state; + u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; + u32 inhibit_mask = + _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { mmio = gen9_render_mmio_list; @@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) continue; mmio->value = I915_READ(mmio->reg); + + /* + * if it is an inhibit context, load in_context mmio + * into HW by mmio write. If it is not, skip this mmio + * write. + */ + if (mmio->in_context && + ((ctx_ctrl & inhibit_mask) != inhibit_mask) && + i915.enable_execlists) + continue; + if (mmio->mask) v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16); else diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 678b0be85376..06c9584ac5f0 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work) vgpu_data = scheduler->current_vgpu->sched_data; head = &vgpu_data->list; } else { - gvt_dbg_sched("no current vgpu search from q head\n"); head = &sched_data->runq_head; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 7ea68a75dc46..d6b6d0efdd1a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", ring_id, workload); - shadow_ctx->desc_template = workload->ctx_desc.addressing_mode << + shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); + shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; mutex_lock(&dev_priv->drm.struct_mutex); @@ -456,7 +457,7 @@ static int workload_thread(void *priv) } complete: - gvt_dbg_sched("will complete workload %p\n, status: %d\n", + gvt_dbg_sched("will complete workload %p, status: %d\n", workload, workload->status); if (workload->req) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 3b30c28bff51..2833dfa8c9ae 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { struct drm_i915_gem_object *obj; void *va; unsigned long len; - void *bb_start_cmd_va; + u32 *bb_start_cmd_va; }; #define workload_q_head(vgpu, ring_id) \ diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 7295bc8e12fb..95a97aa0051e 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -74,7 +74,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) { unsigned int num_types; - unsigned int i, low_avail; + unsigned int i, low_avail, high_avail; unsigned int min_low; /* vGPU type name is defined as GVTg_Vx_y which contains @@ -89,9 +89,9 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) * to indicate how many vGPU instance can be created for this * type. * - * Currently use static size here as we init type earlier.. */ - low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE; + low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; + high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; num_types = 4; gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), @@ -106,7 +106,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) gvt->types[i].low_gm_size = min_low; gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); gvt->types[i].fence = 4; - gvt->types[i].max_instance = low_avail / min_low; + gvt->types[i].max_instance = min(low_avail / min_low, + high_avail / gvt->types[i].high_gm_size); gvt->types[i].avail_instance = gvt->types[i].max_instance; if (IS_GEN8(gvt->dev_priv)) @@ -142,9 +143,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) /* Need to depend on maxium hw resource size but keep on * static config for now. */ - low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE - + low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE - + high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - gvt->gm.vgpu_allocated_high_gm_size; fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - gvt->fence.vgpu_allocated_fence_num; @@ -384,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_resource(vgpu); intel_vgpu_reset_mmio(vgpu); populate_pvinfo_page(vgpu); + intel_vgpu_reset_display(vgpu); if (dmlr) intel_vgpu_reset_cfg_space(vgpu); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 4ae69ebe166e..f6017f2cfb86 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -213,7 +213,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv) } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_KBP; DRM_DEBUG_KMS("Found KabyPoint PCH\n"); - WARN_ON(!IS_KABYLAKE(dev_priv)); + WARN_ON(!IS_SKYLAKE(dev_priv) && + !IS_KABYLAKE(dev_priv)); } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && @@ -824,10 +825,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, if (ret < 0) return ret; - ret = intel_gvt_init(dev_priv); - if (ret < 0) - goto err_workqueues; - /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); @@ -841,7 +838,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, intel_init_audio_hooks(dev_priv); ret = i915_gem_load_init(dev_priv); if (ret < 0) - goto err_gvt; + goto err_workqueues; intel_display_crc_init(dev_priv); @@ -853,8 +850,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, return 0; -err_gvt: - intel_gvt_cleanup(dev_priv); err_workqueues: i915_workqueues_cleanup(dev_priv); return ret; @@ -1077,6 +1072,10 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("can't enable MSI"); } + ret = intel_gvt_init(dev_priv); + if (ret) + goto out_ggtt; + return 0; out_ggtt: @@ -1290,6 +1289,8 @@ void i915_driver_unload(struct drm_device *dev) intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); + intel_gvt_cleanup(dev_priv); + i915_driver_unregister(dev_priv); drm_vblank_cleanup(dev); @@ -2377,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev) assert_forcewakes_inactive(dev_priv); - if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) intel_hpd_poll_init(dev_priv); DRM_DEBUG_KMS("Device suspended\n"); @@ -2426,6 +2427,7 @@ static int intel_runtime_resume(struct device *kdev) * we can do is to hope that things will still work (and disable RPM). */ i915_gem_init_swizzling(dev_priv); + i915_gem_restore_fences(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 244628065f94..e44c598ecb82 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2242,6 +2242,11 @@ struct drm_i915_private { struct i915_frontbuffer_tracking fb_tracking; + struct intel_atomic_helper { + struct llist_head free_list; + struct work_struct free_work; + } atomic_helper; + u16 orig_clock; bool mchbar_need_disable; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c8689892a89f..88f3628b4e29 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -440,7 +440,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, timeout = i915_gem_object_wait_fence(shared[i], flags, timeout, rps); - if (timeout <= 0) + if (timeout < 0) break; dma_fence_put(shared[i]); @@ -453,7 +453,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv, excl = reservation_object_get_excl_rcu(resv); } - if (excl && timeout > 0) + if (excl && timeout >= 0) timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps); dma_fence_put(excl); @@ -2009,8 +2009,16 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - if (WARN_ON(reg->pin_count)) - continue; + /* Ideally we want to assert that the fence register is not + * live at this point (i.e. that no piece of code will be + * trying to write through fence + GTT, as that both violates + * our tracking of activity and associated locking/barriers, + * but also is illegal given that the hw is powered down). + * + * Previously we used reg->pin_count as a "liveness" indicator. + * That is not sufficient, and we need a more fine-grained + * tool if we want to have a sanity check here. + */ if (!reg->vma) continue; @@ -2735,21 +2743,17 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) engine->irq_seqno_barrier(engine); request = i915_gem_find_active_request(engine); - if (!request) - return; + if (request && i915_gem_reset_request(request)) { + DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", + engine->name, request->global_seqno); - if (!i915_gem_reset_request(request)) - return; - - DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n", - engine->name, request->global_seqno); + /* If this context is now banned, skip all pending requests. */ + if (i915_gem_context_is_banned(request->ctx)) + engine_skip_context(request); + } /* Setup the CS to resume from the breadcrumb of the hung request */ engine->reset_hw(engine, request); - - /* If this context is now banned, skip all of its pending requests. */ - if (i915_gem_context_is_banned(request->ctx)) - engine_skip_context(request); } void i915_gem_reset_finish(struct drm_i915_private *dev_priv) @@ -3517,7 +3521,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, vma->display_alignment = max_t(u64, vma->display_alignment, alignment); /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */ - if (obj->cache_dirty) { + if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) { i915_gem_clflush_object(obj, true); intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 57bec08e80c5..d02cfaefe1c8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1180,14 +1180,14 @@ validate_exec_list(struct drm_device *dev, if (exec[i].offset != gen8_canonical_addr(exec[i].offset & PAGE_MASK)) return -EINVAL; - - /* From drm_mm perspective address space is continuous, - * so from this point we're always using non-canonical - * form internally. - */ - exec[i].offset = gen8_noncanonical_addr(exec[i].offset); } + /* From drm_mm perspective address space is continuous, + * so from this point we're always using non-canonical + * form internally. + */ + exec[i].offset = gen8_noncanonical_addr(exec[i].offset); + if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 30d8dbd04f0b..2801a4d56324 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -755,9 +755,10 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, GEM_BUG_ON(pte_end > GEN8_PTES); bitmap_clear(pt->used_ptes, pte, num_entries); - - if (bitmap_empty(pt->used_ptes, GEN8_PTES)) - return true; + if (USES_FULL_PPGTT(vm->i915)) { + if (bitmap_empty(pt->used_ptes, GEN8_PTES)) + return true; + } pt_vaddr = kmap_px(pt); diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index 17ce53d0d092..933019e1b206 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -46,16 +46,39 @@ static struct sg_table * i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); - unsigned int npages = obj->base.size / PAGE_SIZE; struct sg_table *st; struct scatterlist *sg; + unsigned int npages; int max_order; gfp_t gfp; + max_order = MAX_ORDER; +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + unsigned int max_segment; + + max_segment = swiotlb_max_segment(); + if (max_segment) { + max_segment = max_t(unsigned int, max_segment, + PAGE_SIZE) >> PAGE_SHIFT; + max_order = min(max_order, ilog2(max_segment)); + } + } +#endif + + gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; + if (IS_I965GM(i915) || IS_I965G(i915)) { + /* 965gm cannot relocate objects above 4GiB. */ + gfp &= ~__GFP_HIGHMEM; + gfp |= __GFP_DMA32; + } + +create_st: st = kmalloc(sizeof(*st), GFP_KERNEL); if (!st) return ERR_PTR(-ENOMEM); + npages = obj->base.size / PAGE_SIZE; if (sg_alloc_table(st, npages, GFP_KERNEL)) { kfree(st); return ERR_PTR(-ENOMEM); @@ -64,19 +87,6 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) sg = st->sgl; st->nents = 0; - max_order = MAX_ORDER; -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ - max_order = min(max_order, ilog2(IO_TLB_SEGPAGES)); -#endif - - gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; - if (IS_I965GM(i915) || IS_I965G(i915)) { - /* 965gm cannot relocate objects above 4GiB. */ - gfp &= ~__GFP_HIGHMEM; - gfp |= __GFP_DMA32; - } - do { int order = min(fls(npages) - 1, max_order); struct page *page; @@ -104,8 +114,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) sg = __sg_next(sg); } while (1); - if (i915_gem_gtt_prepare_pages(obj, st)) + if (i915_gem_gtt_prepare_pages(obj, st)) { + /* Failed to dma-map try again with single page sg segments */ + if (get_order(st->sgl->length)) { + internal_free_pages(st); + max_order = 0; + goto create_st; + } goto err; + } /* Mark the pages as dontneed whilst they are still pinned. As soon * as they are unpinned they are allowed to be reaped by the shrinker, diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 72b7f7d9461d..f31deeb72703 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -1025,8 +1025,13 @@ __i915_request_wait_for_execute(struct drm_i915_gem_request *request, break; } + if (!timeout) { + timeout = -ETIME; + break; + } + timeout = io_schedule_timeout(timeout); - } while (timeout); + } while (1); finish_wait(&request->execute.wait, &wait); if (flags & I915_WAIT_LOCKED) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index ec7c5d80fe4f..9673bcc3b6ad 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -405,6 +405,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->mm.stolen_lock); + if (intel_vgpu_active(dev_priv)) { + DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); + return 0; + } + #ifdef CONFIG_INTEL_IOMMU if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { DRM_INFO("DMAR active, disabling use of stolen memory\n"); diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index b1361cfd4c5c..974ac08df473 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -173,7 +173,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj, else tile_width = 512; - if (!IS_ALIGNED(stride, tile_width)) + if (!stride || !IS_ALIGNED(stride, tile_width)) return false; /* 965+ just needs multiples of tile width */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ee313247673b..53bb7de6020d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -3123,19 +3123,16 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } -static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) { - u32 hotplug_irqs, hotplug, enabled_irqs; - - hotplug_irqs = SDE_HOTPLUG_MASK_SPT; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + u32 hotplug; /* Enable digital hotplug on the PCH */ hotplug = I915_READ(PCH_PORT_HOTPLUG); - hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | - PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; + hotplug |= PORTA_HOTPLUG_ENABLE | + PORTB_HOTPLUG_ENABLE | + PORTC_HOTPLUG_ENABLE | + PORTD_HOTPLUG_ENABLE; I915_WRITE(PCH_PORT_HOTPLUG, hotplug); hotplug = I915_READ(PCH_PORT_HOTPLUG2); @@ -3143,6 +3140,18 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); } +static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + hotplug_irqs = SDE_HOTPLUG_MASK_SPT; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); + + ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + + spt_hpd_detection_setup(dev_priv); +} + static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 hotplug_irqs, hotplug, enabled_irqs; @@ -3177,18 +3186,15 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_hpd_irq_setup(dev_priv); } -static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, + u32 enabled_irqs) { - u32 hotplug_irqs, hotplug, enabled_irqs; - - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); - hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; - - bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + u32 hotplug; hotplug = I915_READ(PCH_PORT_HOTPLUG); - hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | - PORTA_HOTPLUG_ENABLE; + hotplug |= PORTA_HOTPLUG_ENABLE | + PORTB_HOTPLUG_ENABLE | + PORTC_HOTPLUG_ENABLE; DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", hotplug, enabled_irqs); @@ -3198,7 +3204,6 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) * For BXT invert bit has to be set based on AOB design * for HPD detection logic, update it based on VBT fields. */ - if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) hotplug |= BXT_DDIA_HPD_INVERT; @@ -3212,6 +3217,23 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } +static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) +{ + __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); +} + +static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) +{ + u32 hotplug_irqs, enabled_irqs; + + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); + hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; + + bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + + __bxt_hpd_detection_setup(dev_priv, enabled_irqs); +} + static void ibx_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -3227,6 +3249,12 @@ static void ibx_irq_postinstall(struct drm_device *dev) gen5_assert_iir_is_zero(dev_priv, SDEIIR); I915_WRITE(SDEIMR, ~mask); + + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || + HAS_PCH_LPT(dev_priv)) + ; /* TODO: Enable HPD detection on older PCH platforms too */ + else + spt_hpd_detection_setup(dev_priv); } static void gen5_gt_irq_postinstall(struct drm_device *dev) @@ -3438,6 +3466,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); + + if (IS_GEN9_LP(dev_priv)) + bxt_hpd_detection_setup(dev_priv); } static int gen8_irq_postinstall(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 72f9f36ae5ce..675323189f2c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3307,8 +3307,10 @@ enum skl_disp_power_wells { /* * Logical Context regs */ -#define CCID _MMIO(0x2180) -#define CCID_EN (1<<0) +#define CCID _MMIO(0x2180) +#define CCID_EN BIT(0) +#define CCID_EXTENDED_STATE_RESTORE BIT(2) +#define CCID_EXTENDED_STATE_SAVE BIT(3) /* * Notes on SNB/IVB/VLV context size: * - Power context is saved elsewhere (LLC or stolen) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 385e29af8baa..2bf5aca6e37c 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct edid *edid; struct i2c_adapter *i2c; + bool ret = false; BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); @@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) */ if (!is_digital) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); - return true; + ret = true; + } else { + DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } - - DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); } else { DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); } kfree(edid); - return false; + return ret; } static enum drm_connector_status diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 36ecc864e711..a2fece5e9fb3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2578,8 +2578,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, * We only keep the x/y offsets, so push all of the * gtt offset into the x/y offsets. */ - _intel_adjust_tile_offset(&x, &y, tile_size, - tile_width, tile_height, pitch_tiles, + _intel_adjust_tile_offset(&x, &y, + tile_width, tile_height, + tile_size, pitch_tiles, gtt_offset_rotated * tile_size, 0); gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; @@ -4253,10 +4254,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc) drm_crtc_vblank_put(&intel_crtc->base); wake_up_all(&dev_priv->pending_flip_queue); - queue_work(dev_priv->wq, &work->unpin_work); - trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); + + queue_work(dev_priv->wq, &work->unpin_work); } static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) @@ -6882,6 +6883,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) } state = drm_atomic_state_alloc(crtc->dev); + if (!state) { + DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", + crtc->base.id, crtc->name); + return; + } + state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; /* Everything's already locked, -EDEADLK can't happen. */ @@ -14563,8 +14570,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, break; case FENCE_FREE: - drm_atomic_state_put(&state->base); - break; + { + struct intel_atomic_helper *helper = + &to_i915(state->base.dev)->atomic_helper; + + if (llist_add(&state->freed, &helper->free_list)) + schedule_work(&helper->free_work); + break; + } } return NOTIFY_DONE; @@ -16587,6 +16600,18 @@ fail: drm_modeset_acquire_fini(&ctx); } +static void intel_atomic_helper_free_state(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), atomic_helper.free_work); + struct intel_atomic_state *state, *next; + struct llist_node *freed; + + freed = llist_del_all(&dev_priv->atomic_helper.free_list); + llist_for_each_entry_safe(state, next, freed, freed) + drm_atomic_state_put(&state->base); +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -16606,6 +16631,9 @@ int intel_modeset_init(struct drm_device *dev) dev->mode_config.funcs = &intel_mode_funcs; + INIT_WORK(&dev_priv->atomic_helper.free_work, + intel_atomic_helper_free_state); + intel_init_quirks(dev); intel_init_pm(dev_priv); @@ -17263,6 +17291,9 @@ void intel_modeset_cleanup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); + flush_work(&dev_priv->atomic_helper.free_work); + WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); + intel_disable_gt_powersave(dev_priv); /* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3d8ac8aa7214..d1670b8afbf5 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -2887,6 +2887,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) WARN_ON(intel_dp->active_pipe != INVALID_PIPE); + if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) + return; + edp_panel_vdd_off_sync(intel_dp); /* @@ -2914,9 +2917,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev, lockdep_assert_held(&dev_priv->pps_mutex); - if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B)) - return; - for_each_intel_encoder(dev, encoder) { struct intel_dp *intel_dp; enum port port; @@ -4406,8 +4406,8 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, * * Return %true if @port is connected, %false otherwise. */ -static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, - struct intel_digital_port *port) +bool intel_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port) { if (HAS_PCH_IBX(dev_priv)) return ibx_digital_port_connected(dev_priv, port); diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index c92a2558beb4..e59e43a9f3a6 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -1855,7 +1855,8 @@ bxt_get_dpll(struct intel_crtc *crtc, return NULL; if ((encoder->type == INTEL_OUTPUT_DP || - encoder->type == INTEL_OUTPUT_EDP) && + encoder->type == INTEL_OUTPUT_EDP || + encoder->type == INTEL_OUTPUT_DP_MST) && !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state)) return NULL; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0cec0013ace0..40fed65a791d 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -371,6 +371,8 @@ struct intel_atomic_state { struct skl_wm_values wm_results; struct i915_sw_fence commit_ready; + + struct llist_node freed; }; struct intel_plane_state { @@ -1485,6 +1487,8 @@ bool __intel_dp_read_desc(struct intel_dp *intel_dp, bool intel_dp_read_desc(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); +bool intel_digital_port_connected(struct drm_i915_private *dev_priv, + struct intel_digital_port *port); /* intel_dp_aux_backlight.c */ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector); diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 290384e86c63..d23c0fcff751 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -67,6 +67,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) return 0; } + if (intel_vgpu_active(dev_priv)) { + DRM_DEBUG_DRIVER("GVT-g is disabled for guest\n"); + goto bail; + } + if (!is_supported_device(dev_priv)) { DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n"); goto bail; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 432ee495dec2..ebf8023d21e6 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -360,7 +360,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) static u64 execlists_update_context(struct drm_i915_gem_request *rq) { struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; - struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; + struct i915_hw_ppgtt *ppgtt = + rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; u32 *reg_state = ce->lrc_reg_state; reg_state[CTX_RING_TAIL+1] = rq->tail; @@ -1389,7 +1390,20 @@ static void reset_common_ring(struct intel_engine_cs *engine, { struct drm_i915_private *dev_priv = engine->i915; struct execlist_port *port = engine->execlist_port; - struct intel_context *ce = &request->ctx->engine[engine->id]; + struct intel_context *ce; + + /* If the request was innocent, we leave the request in the ELSP + * and will try to replay it on restarting. The context image may + * have been corrupted by the reset, in which case we may have + * to service a new GPU hang, but more likely we can continue on + * without impact. + * + * If the request was guilty, we presume the context is corrupt + * and have to at least restore the RING register in the context + * image back to the expected values to skip over the guilty request. + */ + if (!request || request->fence.error != -EIO) + return; /* We want a simple context + ring to execute the breadcrumb update. * We cannot rely on the context being intact across the GPU hang, @@ -1398,6 +1412,7 @@ static void reset_common_ring(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ + ce = &request->ctx->engine[engine->id]; execlists_init_reg_state(ce->lrc_reg_state, request->ctx, engine, ce->ring); diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index f6d4e6940257..c300647ef604 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c @@ -158,6 +158,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); unsigned long start = jiffies; if (!lspcon->desc_valid) @@ -173,7 +175,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) if (!__intel_dp_read_desc(intel_dp, &desc)) return; - if (!memcmp(&intel_dp->desc, &desc, sizeof(desc))) { + if (intel_digital_port_connected(dev_priv, dig_port) && + !memcmp(&intel_dp->desc, &desc, sizeof(desc))) { DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n", jiffies_to_msecs(jiffies - start)); return; diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index f4429f67a4e3..4a862a358c70 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -982,7 +982,18 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) opregion->vbt_size = vbt_size; } else { vbt = base + OPREGION_VBT_OFFSET; - vbt_size = OPREGION_ASLE_EXT_OFFSET - OPREGION_VBT_OFFSET; + /* + * The VBT specification says that if the ASLE ext + * mailbox is not used its area is reserved, but + * on some CHT boards the VBT extends into the + * ASLE ext area. Allow this even though it is + * against the spec, so we do not end up rejecting + * the VBT on those boards (and end up not finding the + * LCD panel because of this). + */ + vbt_size = (mboxes & MBOX_ASLE_EXT) ? + OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE; + vbt_size -= OPREGION_VBT_OFFSET; if (intel_bios_is_valid_vbt(vbt, vbt_size)) { DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n"); opregion->vbt = vbt; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 69035e4f9b3b..91bc4abf5d3e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -599,10 +599,62 @@ out: static void reset_ring_common(struct intel_engine_cs *engine, struct drm_i915_gem_request *request) { - struct intel_ring *ring = request->ring; + /* Try to restore the logical GPU state to match the continuation + * of the request queue. If we skip the context/PD restore, then + * the next request may try to execute assuming that its context + * is valid and loaded on the GPU and so may try to access invalid + * memory, prompting repeated GPU hangs. + * + * If the request was guilty, we still restore the logical state + * in case the next request requires it (e.g. the aliasing ppgtt), + * but skip over the hung batch. + * + * If the request was innocent, we try to replay the request with + * the restored context. + */ + if (request) { + struct drm_i915_private *dev_priv = request->i915; + struct intel_context *ce = &request->ctx->engine[engine->id]; + struct i915_hw_ppgtt *ppgtt; + + /* FIXME consider gen8 reset */ + + if (ce->state) { + I915_WRITE(CCID, + i915_ggtt_offset(ce->state) | + BIT(8) /* must be set! */ | + CCID_EXTENDED_STATE_SAVE | + CCID_EXTENDED_STATE_RESTORE | + CCID_EN); + } - ring->head = request->postfix; - ring->last_retired_head = -1; + ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; + if (ppgtt) { + u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; + + I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(engine), pd_offset); + + /* Wait for the PD reload to complete */ + if (intel_wait_for_register(dev_priv, + RING_PP_DIR_BASE(engine), + BIT(0), 0, + 10)) + DRM_ERROR("Wait for reload of ppgtt page-directory timed out\n"); + + ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); + } + + /* If the rq hung, jump to its breadcrumb and skip the batch */ + if (request->fence.error == -EIO) { + struct intel_ring *ring = request->ring; + + ring->head = request->postfix; + ring->last_retired_head = -1; + } + } else { + engine->legacy_active_context = NULL; + } } static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 45dceb672e20..4b7b92a7bcf7 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -255,8 +255,8 @@ static int imx_drm_bind(struct device *dev) * this value would be used to check framebuffer size limitation * at drm_mode_addfb(). */ - drm->mode_config.min_width = 64; - drm->mode_config.min_height = 64; + drm->mode_config.min_width = 1; + drm->mode_config.min_height = 1; drm->mode_config.max_width = 4096; drm->mode_config.max_height = 4096; drm->mode_config.funcs = &imx_drm_mode_config_funcs; diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 8f8aa4a63122..4826bb781723 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -98,6 +98,8 @@ /* TVE_TST_MODE_REG */ #define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0) +#define IMX_TVE_DAC_VOLTAGE 2750000 + enum { TVE_MODE_TVOUT, TVE_MODE_VGA, @@ -616,9 +618,8 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) tve->dac_reg = devm_regulator_get(dev, "dac"); if (!IS_ERR(tve->dac_reg)) { - ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000); - if (ret) - return ret; + if (regulator_get_voltage(tve->dac_reg) != IMX_TVE_DAC_VOLTAGE) + dev_warn(dev, "dac voltage is not %d uV\n", IMX_TVE_DAC_VOLTAGE); ret = regulator_enable(tve->dac_reg); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 7f78da695dff..5b8e23d051f2 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -72,3 +72,10 @@ config DRM_MSM_DSI_28NM_8960_PHY help Choose this option if the 28nm DSI PHY 8960 variant is used on the platform. + +config DRM_MSM_DSI_14NM_PHY + bool "Enable DSI 14nm PHY driver in MSM DRM (used by MSM8996/APQ8096)" + depends on DRM_MSM_DSI + default y + help + Choose this option if DSI PHY on 8996 is used on the platform. diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 028c24df2291..39055362da95 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -76,11 +76,13 @@ msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o +msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y) msm-y += dsi/pll/dsi_pll.o msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o +msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o endif obj-$(CONFIG_DRM_MSM) += msm.o diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index b8647198c11c..4414cf73735d 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -12,6 +12,7 @@ */ #include "msm_gem.h" +#include "msm_mmu.h" #include "a5xx_gpu.h" extern bool hang_debug; @@ -327,7 +328,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) /* Enable RBBM error reporting bits */ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001); - if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { + if (adreno_gpu->info->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) { /* * Mask out the activity signals from RB1-3 to avoid false * positives @@ -381,7 +382,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22)); - if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) + if (adreno_gpu->info->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI) gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100); @@ -573,6 +574,19 @@ static bool a5xx_idle(struct msm_gpu *gpu) return true; } +static int a5xx_fault_handler(void *arg, unsigned long iova, int flags) +{ + struct msm_gpu *gpu = arg; + pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n", + iova, flags, + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(4)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(5)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(6)), + gpu_read(gpu, REG_A5XX_CP_SCRATCH_REG(7))); + + return -EFAULT; +} + static void a5xx_cp_err_irq(struct msm_gpu *gpu) { u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS); @@ -884,5 +898,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } + if (gpu->aspace) + msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu, a5xx_fault_handler); + return gpu; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 893eb2b2531b..ece39b16a864 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -75,12 +75,14 @@ static const struct adreno_info gpulist[] = { .gmem = (SZ_1M + SZ_512K), .init = a4xx_gpu_init, }, { - .rev = ADRENO_REV(5, 3, 0, ANY_ID), + .rev = ADRENO_REV(5, 3, 0, 2), .revn = 530, .name = "A530", .pm4fw = "a530_pm4.fw", .pfpfw = "a530_pfp.fw", .gmem = SZ_1M, + .quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI | + ADRENO_QUIRK_FAULT_DETECT_MASK, .init = a5xx_gpu_init, .gpmufw = "a530v3_gpmu.fw2", }, @@ -181,22 +183,51 @@ static void set_gpu_pdev(struct drm_device *dev, priv->gpu_pdev = pdev; } -static const struct { - const char *str; - uint32_t flag; -} quirks[] = { - { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI }, - { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK }, -}; +static int find_chipid(struct device *dev, u32 *chipid) +{ + struct device_node *node = dev->of_node; + const char *compat; + int ret; + + /* first search the compat strings for qcom,adreno-XYZ.W: */ + ret = of_property_read_string_index(node, "compatible", 0, &compat); + if (ret == 0) { + unsigned rev, patch; + + if (sscanf(compat, "qcom,adreno-%u.%u", &rev, &patch) == 2) { + *chipid = 0; + *chipid |= (rev / 100) << 24; /* core */ + rev %= 100; + *chipid |= (rev / 10) << 16; /* major */ + rev %= 10; + *chipid |= rev << 8; /* minor */ + *chipid |= patch; + + return 0; + } + } + + /* and if that fails, fall back to legacy "qcom,chipid" property: */ + ret = of_property_read_u32(node, "qcom,chipid", chipid); + if (ret) + return ret; + + dev_warn(dev, "Using legacy qcom,chipid binding!\n"); + dev_warn(dev, "Use compatible qcom,adreno-%u%u%u.%u instead.\n", + (*chipid >> 24) & 0xff, (*chipid >> 16) & 0xff, + (*chipid >> 8) & 0xff, *chipid & 0xff); + + return 0; +} static int adreno_bind(struct device *dev, struct device *master, void *data) { static struct adreno_platform_config config = {}; struct device_node *child, *node = dev->of_node; u32 val; - int ret, i; + int ret; - ret = of_property_read_u32(node, "qcom,chipid", &val); + ret = find_chipid(dev, &val); if (ret) { dev_err(dev, "could not find chipid: %d\n", ret); return ret; @@ -224,14 +255,12 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) } if (!config.fast_rate) { - dev_err(dev, "could not find clk rates\n"); - return -ENXIO; + dev_warn(dev, "could not find clk rates\n"); + /* This is a safe low speed for all devices: */ + config.fast_rate = 200000000; + config.slow_rate = 27000000; } - for (i = 0; i < ARRAY_SIZE(quirks); i++) - if (of_property_read_bool(node, quirks[i].str)) - config.quirks |= quirks[i].flag; - dev->platform_data = &config; set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev)); return 0; @@ -260,6 +289,7 @@ static int adreno_remove(struct platform_device *pdev) } static const struct of_device_id dt_match[] = { + { .compatible = "qcom,adreno" }, { .compatible = "qcom,adreno-3xx" }, /* for backwards compat w/ downstream kgsl DT files: */ { .compatible = "qcom,kgsl-3d0" }, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 686a580c711a..c9bd1e6225f4 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -352,7 +352,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->gmem = adreno_gpu->info->gmem; adreno_gpu->revn = adreno_gpu->info->revn; adreno_gpu->rev = config->rev; - adreno_gpu->quirks = config->quirks; gpu->fast_rate = config->fast_rate; gpu->slow_rate = config->slow_rate; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index e8d55b0306ed..42e444a67630 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -75,6 +75,7 @@ struct adreno_info { const char *pm4fw, *pfpfw; const char *gpmufw; uint32_t gmem; + enum adreno_quirks quirks; struct msm_gpu *(*init)(struct drm_device *dev); }; @@ -116,8 +117,6 @@ struct adreno_gpu { * code (a3xx_gpu.c) and stored in this common location. */ const unsigned int *reg_offsets; - - uint32_t quirks; }; #define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) @@ -128,7 +127,6 @@ struct adreno_platform_config { #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING struct msm_bus_scale_pdata *bus_scale_table; #endif - uint32_t quirks; }; #define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000) diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index ec572f8389ed..311c1c1e7d6c 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -18,9 +18,7 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi) if (!msm_dsi || !msm_dsi_device_connected(msm_dsi)) return NULL; - return (msm_dsi->device_flags & MIPI_DSI_MODE_VIDEO) ? - msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID] : - msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; + return msm_dsi->encoder; } static int dsi_get_phy(struct msm_dsi *msm_dsi) @@ -187,14 +185,13 @@ void __exit msm_dsi_unregister(void) } int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) + struct drm_encoder *encoder) { struct msm_drm_private *priv = dev->dev_private; struct drm_bridge *ext_bridge; - int ret, i; + int ret; - if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] || - !encoders[MSM_DSI_CMD_ENCODER_ID])) + if (WARN_ON(!encoder)) return -EINVAL; msm_dsi->dev = dev; @@ -205,6 +202,8 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } + msm_dsi->encoder = encoder; + msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id); if (IS_ERR(msm_dsi->bridge)) { ret = PTR_ERR(msm_dsi->bridge); @@ -213,11 +212,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, goto fail; } - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { - encoders[i]->bridge = msm_dsi->bridge; - msm_dsi->encoders[i] = encoders[i]; - } - /* * check if the dsi encoder output is connected to a panel or an * external bridge. We create a connector only if we're connected to a diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 03f115f532c2..32369975d155 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -27,14 +27,24 @@ #define DSI_1 1 #define DSI_MAX 2 +struct msm_dsi_phy_shared_timings; +struct msm_dsi_phy_clk_request; + enum msm_dsi_phy_type { MSM_DSI_PHY_28NM_HPM, MSM_DSI_PHY_28NM_LP, MSM_DSI_PHY_20NM, MSM_DSI_PHY_28NM_8960, + MSM_DSI_PHY_14NM, MSM_DSI_PHY_MAX }; +enum msm_dsi_phy_usecase { + MSM_DSI_PHY_STANDALONE, + MSM_DSI_PHY_MASTER, + MSM_DSI_PHY_SLAVE, +}; + #define DSI_DEV_REGULATOR_MAX 8 #define DSI_BUS_CLK_MAX 4 @@ -73,8 +83,8 @@ struct msm_dsi { struct device *phy_dev; bool phy_enabled; - /* the encoders we are hooked to (outside of dsi block) */ - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]; + /* the encoder we are hooked to (outside of dsi block) */ + struct drm_encoder *encoder; int id; }; @@ -84,12 +94,9 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id); void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge); struct drm_connector *msm_dsi_manager_connector_init(u8 id); struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id); -int msm_dsi_manager_phy_enable(int id, - const unsigned long bit_rate, const unsigned long esc_rate, - u32 *clk_pre, u32 *clk_post); -void msm_dsi_manager_phy_disable(int id); int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg); bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len); +void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags); int msm_dsi_manager_register(struct msm_dsi *msm_dsi); void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi); @@ -111,6 +118,8 @@ int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll, struct clk **byte_clk_provider, struct clk **pixel_clk_provider); void msm_dsi_pll_save_state(struct msm_dsi_pll *pll); int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll); +int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc); #else static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { @@ -131,6 +140,11 @@ static inline int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) { return 0; } +static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + return -ENODEV; +} #endif /* dsi host */ @@ -146,7 +160,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base, u32 len); int msm_dsi_host_enable(struct mipi_dsi_host *host); int msm_dsi_host_disable(struct mipi_dsi_host *host); -int msm_dsi_host_power_on(struct mipi_dsi_host *host); +int msm_dsi_host_power_on(struct mipi_dsi_host *host, + struct msm_dsi_phy_shared_timings *phy_shared_timings); int msm_dsi_host_power_off(struct mipi_dsi_host *host); int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode); @@ -157,6 +172,9 @@ int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); void msm_dsi_host_unregister(struct mipi_dsi_host *host); int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host, struct msm_dsi_pll *src_pll); +void msm_dsi_host_reset_phy(struct mipi_dsi_host *host); +void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_host_destroy(struct mipi_dsi_host *host); int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, struct drm_device *dev); @@ -164,14 +182,27 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi); /* dsi phy */ struct msm_dsi_phy; +struct msm_dsi_phy_shared_timings { + u32 clk_post; + u32 clk_pre; + bool clk_pre_inc_by_2; +}; + +struct msm_dsi_phy_clk_request { + unsigned long bitclk_rate; + unsigned long escclk_rate; +}; + void msm_dsi_phy_driver_register(void); void msm_dsi_phy_driver_unregister(void); int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate); + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_disable(struct msm_dsi_phy *phy); -void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, - u32 *clk_pre, u32 *clk_post); +void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, + struct msm_dsi_phy_shared_timings *shared_timing); struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy); +void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, + enum msm_dsi_phy_usecase uc); #endif /* __DSI_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h index 39dff7d5e89b..b3d70ea42891 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.xml.h +++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h @@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) - -Copyright (C) 2013-2015 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-01-11 05:19:19) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) + +Copyright (C) 2013-2017 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) @@ -1304,5 +1295,257 @@ static inline uint32_t DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) #define REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 +#define REG_DSI_14nm_PHY_CMN_REVISION_ID0 0x00000000 + +#define REG_DSI_14nm_PHY_CMN_REVISION_ID1 0x00000004 + +#define REG_DSI_14nm_PHY_CMN_REVISION_ID2 0x00000008 + +#define REG_DSI_14nm_PHY_CMN_REVISION_ID3 0x0000000c + +#define REG_DSI_14nm_PHY_CMN_CLK_CFG0 0x00000010 +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK 0x000000f0 +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT 4 +static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0__MASK; +} +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK 0x000000f0 +#define DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT 4 +static inline uint32_t DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__SHIFT) & DSI_14nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4__MASK; +} + +#define REG_DSI_14nm_PHY_CMN_CLK_CFG1 0x00000014 +#define DSI_14nm_PHY_CMN_CLK_CFG1_DSICLK_SEL 0x00000001 + +#define REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL 0x00000018 +#define DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL 0x00000004 + +#define REG_DSI_14nm_PHY_CMN_CTRL_0 0x0000001c + +#define REG_DSI_14nm_PHY_CMN_CTRL_1 0x00000020 + +#define REG_DSI_14nm_PHY_CMN_HW_TRIGGER 0x00000024 + +#define REG_DSI_14nm_PHY_CMN_SW_CFG0 0x00000028 + +#define REG_DSI_14nm_PHY_CMN_SW_CFG1 0x0000002c + +#define REG_DSI_14nm_PHY_CMN_SW_CFG2 0x00000030 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG0 0x00000034 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG1 0x00000038 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG2 0x0000003c + +#define REG_DSI_14nm_PHY_CMN_HW_CFG3 0x00000040 + +#define REG_DSI_14nm_PHY_CMN_HW_CFG4 0x00000044 + +#define REG_DSI_14nm_PHY_CMN_PLL_CNTRL 0x00000048 +#define DSI_14nm_PHY_CMN_PLL_CNTRL_PLL_START 0x00000001 + +#define REG_DSI_14nm_PHY_CMN_LDO_CNTRL 0x0000004c +#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK 0x0000003f +#define DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__SHIFT) & DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; } +#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK 0x000000c0 +#define DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT 6 +static inline uint32_t DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__SHIFT) & DSI_14nm_PHY_LN_CFG0_PREPARE_DLY__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; } +#define DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN 0x00000001 + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_TEST_STR(uint32_t i0) { return 0x00000014 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(uint32_t i0) { return 0x00000018 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(uint32_t i0) { return 0x0000001c + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(uint32_t i0) { return 0x00000020 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(uint32_t i0) { return 0x00000024 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(uint32_t i0) { return 0x00000028 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(uint32_t i0) { return 0x0000002c + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK 0x00000007 +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO__MASK; +} +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK 0x00000070 +#define DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT 4 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(uint32_t i0) { return 0x00000030 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK 0x00000007 +#define DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(uint32_t i0) { return 0x00000034 + 0x80*i0; } +#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK 0x000000ff +#define DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT 0 +static inline uint32_t DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(uint32_t val) +{ + return ((val) << DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__SHIFT) & DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD__MASK; +} + +static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(uint32_t i0) { return 0x00000038 + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(uint32_t i0) { return 0x0000003c + 0x80*i0; } + +static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00000064 + 0x80*i0; } + +#define REG_DSI_14nm_PHY_PLL_IE_TRIM 0x00000000 + +#define REG_DSI_14nm_PHY_PLL_IP_TRIM 0x00000004 + +#define REG_DSI_14nm_PHY_PLL_IPTAT_TRIM 0x00000010 + +#define REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN 0x0000001c + +#define REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET 0x00000028 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL 0x0000002c + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2 0x00000030 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL3 0x00000034 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL4 0x00000038 + +#define REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5 0x0000003c + +#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1 0x00000040 + +#define REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2 0x00000044 + +#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT1 0x00000048 + +#define REG_DSI_14nm_PHY_PLL_KVCO_COUNT2 0x0000004c + +#define REG_DSI_14nm_PHY_PLL_VREF_CFG1 0x0000005c + +#define REG_DSI_14nm_PHY_PLL_KVCO_CODE 0x00000058 + +#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1 0x0000006c + +#define REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2 0x00000070 + +#define REG_DSI_14nm_PHY_PLL_VCO_COUNT1 0x00000074 + +#define REG_DSI_14nm_PHY_PLL_VCO_COUNT2 0x00000078 + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1 0x0000007c + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2 0x00000080 + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3 0x00000084 + +#define REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN 0x00000088 + +#define REG_DSI_14nm_PHY_PLL_PLL_VCO_TUNE 0x0000008c + +#define REG_DSI_14nm_PHY_PLL_DEC_START 0x00000090 + +#define REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER 0x00000094 + +#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1 0x00000098 + +#define REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2 0x0000009c + +#define REG_DSI_14nm_PHY_PLL_SSC_PER1 0x000000a0 + +#define REG_DSI_14nm_PHY_PLL_SSC_PER2 0x000000a4 + +#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1 0x000000a8 + +#define REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2 0x000000ac + +#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1 0x000000b4 + +#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2 0x000000b8 + +#define REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3 0x000000bc + +#define REG_DSI_14nm_PHY_PLL_TXCLK_EN 0x000000c0 + +#define REG_DSI_14nm_PHY_PLL_PLL_CRCTRL 0x000000c4 + +#define REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS 0x000000cc + +#define REG_DSI_14nm_PHY_PLL_PLL_MISC1 0x000000e8 + +#define REG_DSI_14nm_PHY_PLL_CP_SET_CUR 0x000000f0 + +#define REG_DSI_14nm_PHY_PLL_PLL_ICPMSET 0x000000f4 + +#define REG_DSI_14nm_PHY_PLL_PLL_ICPCSET 0x000000f8 + +#define REG_DSI_14nm_PHY_PLL_PLL_ICP_SET 0x000000fc + +#define REG_DSI_14nm_PHY_PLL_PLL_LPF1 0x00000100 + +#define REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV 0x00000104 + +#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108 + #endif /* DSI_XML */ diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c index 63436d8ee470..a5d75c9b3a73 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c @@ -94,6 +94,30 @@ static const struct msm_dsi_config msm8994_dsi_cfg = { .num_dsi = 2, }; +/* + * TODO: core_mmss_clk fails to enable for some reason, but things work fine + * without it too. Figure out why it doesn't enable and uncomment below + */ +static const char * const dsi_8996_bus_clk_names[] = { + "mdp_core_clk", "iface_clk", "bus_clk", /* "core_mmss_clk", */ +}; + +static const struct msm_dsi_config msm8996_dsi_cfg = { + .io_offset = DSI_6G_REG_SHIFT, + .reg_cfg = { + .num = 2, + .regs = { + {"vdda", 18160, 1 }, /* 1.25 V */ + {"vcca", 17000, 32 }, /* 0.925 V */ + {"vddio", 100000, 100 },/* 1.8 V */ + }, + }, + .bus_clk_names = dsi_8996_bus_clk_names, + .num_bus_clks = ARRAY_SIZE(dsi_8996_bus_clk_names), + .io_start = { 0x994000, 0x996000 }, + .num_dsi = 2, +}; + static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0, @@ -106,6 +130,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { &msm8974_apq8084_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg}, {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg}, + {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg}, }; const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor) diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h index eeacc3232494..00a5da2663c6 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h +++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h @@ -24,6 +24,7 @@ #define MSM_DSI_6G_VER_MINOR_V1_2 0x10020000 #define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000 #define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001 +#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001 #define MSM_DSI_V2_VER_MINOR_8064 0x0 diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index c96e270361b0..4f79b109173d 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -691,17 +691,6 @@ static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host) return 0; } -static void dsi_phy_sw_reset(struct msm_dsi_host *msm_host) -{ - DBG(""); - dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); - /* Make sure fully reset */ - wmb(); - udelay(1000); - dsi_write(msm_host, REG_DSI_PHY_RESET, 0); - udelay(100); -} - static void dsi_intr_ctrl(struct msm_dsi_host *msm_host, u32 mask, int enable) { u32 intr; @@ -756,7 +745,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt( } static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, - u32 clk_pre, u32 clk_post) + struct msm_dsi_phy_shared_timings *phy_shared_timings) { u32 flags = msm_host->mode_flags; enum mipi_dsi_pixel_format mipi_fmt = msm_host->format; @@ -819,10 +808,16 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable, data |= DSI_TRIG_CTRL_BLOCK_DMA_WITHIN_FRAME; dsi_write(msm_host, REG_DSI_TRIG_CTRL, data); - data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(clk_post) | - DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(clk_pre); + data = DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(phy_shared_timings->clk_post) | + DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(phy_shared_timings->clk_pre); dsi_write(msm_host, REG_DSI_CLKOUT_TIMING_CTRL, data); + if ((cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) && + (cfg_hnd->minor > MSM_DSI_6G_VER_MINOR_V1_0) && + phy_shared_timings->clk_pre_inc_by_2) + dsi_write(msm_host, REG_DSI_T_CLK_PRE_EXTEND, + DSI_T_CLK_PRE_EXTEND_INC_BY_2_BYTECLK); + data = 0; if (!(flags & MIPI_DSI_MODE_EOT_PACKET)) data |= DSI_EOT_PACKET_CTRL_TX_EOT_APPEND; @@ -1482,6 +1477,8 @@ static int dsi_host_attach(struct mipi_dsi_host *host, msm_host->format = dsi->format; msm_host->mode_flags = dsi->mode_flags; + msm_dsi_manager_attach_dsi_device(msm_host->id, dsi->mode_flags); + /* Some gpios defined in panel DT need to be controlled by host */ ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev); if (ret) @@ -1557,8 +1554,9 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, prop = of_find_property(ep, "data-lanes", &len); if (!prop) { - dev_dbg(dev, "failed to find data lane mapping\n"); - return -EINVAL; + dev_dbg(dev, + "failed to find data lane mapping, using default\n"); + return 0; } num_lanes = len / sizeof(u32); @@ -1615,7 +1613,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) struct device *dev = &msm_host->pdev->dev; struct device_node *np = dev->of_node; struct device_node *endpoint, *device_node; - int ret; + int ret = 0; /* * Get the endpoint of the output port of the DSI host. In our case, @@ -1639,8 +1637,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host) /* Get panel node from the output port's endpoint data */ device_node = of_graph_get_remote_port_parent(endpoint); if (!device_node) { - dev_err(dev, "%s: no valid device\n", __func__); - ret = -ENODEV; + dev_dbg(dev, "%s: no valid device\n", __func__); goto err; } @@ -2119,6 +2116,28 @@ exit: return ret; } +void msm_dsi_host_reset_phy(struct mipi_dsi_host *host) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + DBG(""); + dsi_write(msm_host, REG_DSI_PHY_RESET, DSI_PHY_RESET_RESET); + /* Make sure fully reset */ + wmb(); + udelay(1000); + dsi_write(msm_host, REG_DSI_PHY_RESET, 0); + udelay(100); +} + +void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + + clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; + clk_req->escclk_rate = msm_host->esc_clk_rate; +} + int msm_dsi_host_enable(struct mipi_dsi_host *host) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); @@ -2166,10 +2185,10 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable) SFPB_GPREG_MASTER_PORT_EN(en)); } -int msm_dsi_host_power_on(struct mipi_dsi_host *host) +int msm_dsi_host_power_on(struct mipi_dsi_host *host, + struct msm_dsi_phy_shared_timings *phy_shared_timings) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); - u32 clk_pre = 0, clk_post = 0; int ret = 0; mutex_lock(&msm_host->dev_mutex); @@ -2180,12 +2199,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) msm_dsi_sfpb_config(msm_host, true); - ret = dsi_calc_clk_rate(msm_host); - if (ret) { - pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); - goto unlock_ret; - } - ret = dsi_host_regulator_enable(msm_host); if (ret) { pr_err("%s:Failed to enable vregs.ret=%d\n", @@ -2193,23 +2206,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) goto unlock_ret; } - ret = dsi_bus_clk_enable(msm_host); - if (ret) { - pr_err("%s: failed to enable bus clocks, %d\n", __func__, ret); - goto fail_disable_reg; - } - - dsi_phy_sw_reset(msm_host); - ret = msm_dsi_manager_phy_enable(msm_host->id, - msm_host->byte_clk_rate * 8, - msm_host->esc_clk_rate, - &clk_pre, &clk_post); - dsi_bus_clk_disable(msm_host); - if (ret) { - pr_err("%s: failed to enable phy, %d\n", __func__, ret); - goto fail_disable_reg; - } - ret = dsi_clk_ctrl(msm_host, 1); if (ret) { pr_err("%s: failed to enable clocks. ret=%d\n", __func__, ret); @@ -2225,7 +2221,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host) dsi_timing_setup(msm_host); dsi_sw_reset(msm_host); - dsi_ctrl_config(msm_host, true, clk_pre, clk_post); + dsi_ctrl_config(msm_host, true, phy_shared_timings); if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 1); @@ -2254,15 +2250,13 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host) goto unlock_ret; } - dsi_ctrl_config(msm_host, false, 0, 0); + dsi_ctrl_config(msm_host, false, NULL); if (msm_host->disp_en_gpio) gpiod_set_value(msm_host->disp_en_gpio, 0); pinctrl_pm_select_sleep_state(&msm_host->pdev->dev); - msm_dsi_manager_phy_disable(msm_host->id); - dsi_clk_ctrl(msm_host, 0); dsi_host_regulator_disable(msm_host); @@ -2282,6 +2276,7 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, struct drm_display_mode *mode) { struct msm_dsi_host *msm_host = to_msm_dsi_host(host); + int ret; if (msm_host->mode) { drm_mode_destroy(msm_host->dev, msm_host->mode); @@ -2294,6 +2289,12 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, return -ENOMEM; } + ret = dsi_calc_clk_rate(msm_host); + if (ret) { + pr_err("%s: unable to calc clk rate, %d\n", __func__, ret); + return ret; + } + return 0; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 2bd8dad76105..921270ea6059 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -72,11 +72,12 @@ static int dsi_mgr_parse_dual_dsi(struct device_node *np, int id) return 0; } -static int dsi_mgr_host_register(int id) +static int dsi_mgr_setup_components(int id) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id); struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *clk_slave_dsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); struct msm_dsi_pll *src_pll; int ret; @@ -85,15 +86,16 @@ static int dsi_mgr_host_register(int id) if (ret) return ret; + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); src_pll = msm_dsi_phy_get_pll(msm_dsi->phy); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); } else if (!other_dsi) { ret = 0; } else { - struct msm_dsi *mdsi = IS_MASTER_DSI_LINK(id) ? - msm_dsi : other_dsi; - struct msm_dsi *sdsi = IS_MASTER_DSI_LINK(id) ? - other_dsi : msm_dsi; + struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? + msm_dsi : other_dsi; + struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? + other_dsi : msm_dsi; /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master * DSI device's probe. @@ -101,14 +103,18 @@ static int dsi_mgr_host_register(int id) * because only master DSI device adds the panel to global * panel list. The panel's device is the master DSI device. */ - ret = msm_dsi_host_register(sdsi->host, false); + ret = msm_dsi_host_register(slave_link_dsi->host, false); if (ret) return ret; - ret = msm_dsi_host_register(mdsi->host, true); + ret = msm_dsi_host_register(master_link_dsi->host, true); if (ret) return ret; /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy); ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll); if (ret) @@ -119,6 +125,84 @@ static int dsi_mgr_host_register(int id) return ret; } +static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id, + struct msm_dsi_phy_shared_timings *shared_timings) +{ + struct msm_dsi_phy_clk_request clk_req; + int ret; + + msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req); + + ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req); + msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings); + + return ret; +} + +static int +dsi_mgr_phy_enable(int id, + struct msm_dsi_phy_shared_timings shared_timings[DSI_MAX]) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; + int ret; + + /* In case of dual DSI, some registers in PHY1 have been programmed + * during PLL0 clock's set_rate. The PHY1 reset called by host1 here + * will silently reset those PHY1 registers. Therefore we need to reset + * and enable both PHYs before any PLL clock operation. + */ + if (IS_DUAL_DSI() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_host_reset_phy(mdsi->host); + msm_dsi_host_reset_phy(sdsi->host); + + ret = enable_phy(mdsi, src_pll_id, + &shared_timings[DSI_CLOCK_MASTER]); + if (ret) + return ret; + ret = enable_phy(sdsi, src_pll_id, + &shared_timings[DSI_CLOCK_SLAVE]); + if (ret) { + msm_dsi_phy_disable(mdsi->phy); + return ret; + } + } + } else { + msm_dsi_host_reset_phy(mdsi->host); + ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]); + if (ret) + return ret; + } + + msm_dsi->phy_enabled = true; + + return 0; +} + +static void dsi_mgr_phy_disable(int id) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); + struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); + + /* disable DSI phy + * In dual-dsi configuration, the phy should be disabled for the + * first controller only when the second controller is disabled. + */ + msm_dsi->phy_enabled = false; + if (IS_DUAL_DSI() && mdsi && sdsi) { + if (!mdsi->phy_enabled && !sdsi->phy_enabled) { + msm_dsi_phy_disable(sdsi->phy); + msm_dsi_phy_disable(mdsi->phy); + } + } else { + msm_dsi_phy_disable(msm_dsi->phy); + } +} + struct dsi_connector { struct drm_connector base; int id; @@ -168,6 +252,16 @@ static enum drm_connector_status dsi_mgr_connector_detect( msm_dsi->panel = msm_dsi_host_get_panel( other_dsi->host, NULL); + + if (msm_dsi->panel && kms->funcs->set_encoder_mode) { + bool cmd_mode = !(msm_dsi->device_flags & + MIPI_DSI_MODE_VIDEO); + struct drm_encoder *encoder = + msm_dsi_get_encoder(msm_dsi); + + kms->funcs->set_encoder_mode(kms, encoder, cmd_mode); + } + if (msm_dsi->panel && IS_DUAL_DSI()) drm_object_attach_property(&connector->base, connector->dev->mode_config.tile_property, 0); @@ -344,22 +438,31 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge) struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1); struct mipi_dsi_host *host = msm_dsi->host; struct drm_panel *panel = msm_dsi->panel; + struct msm_dsi_phy_shared_timings phy_shared_timings[DSI_MAX]; bool is_dual_dsi = IS_DUAL_DSI(); int ret; DBG("id=%d", id); - if (!msm_dsi_device_connected(msm_dsi) || - (is_dual_dsi && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi)) return; - ret = msm_dsi_host_power_on(host); + ret = dsi_mgr_phy_enable(id, phy_shared_timings); + if (ret) + goto phy_en_fail; + + /* Do nothing with the host if it is DSI 1 in case of dual DSI */ + if (is_dual_dsi && (DSI_1 == id)) + return; + + ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]); if (ret) { pr_err("%s: power on host %d failed, %d\n", __func__, id, ret); goto host_on_fail; } if (is_dual_dsi && msm_dsi1) { - ret = msm_dsi_host_power_on(msm_dsi1->host); + ret = msm_dsi_host_power_on(msm_dsi1->host, + &phy_shared_timings[DSI_1]); if (ret) { pr_err("%s: power on host1 failed, %d\n", __func__, ret); @@ -418,6 +521,8 @@ panel_prep_fail: host1_on_fail: msm_dsi_host_power_off(host); host_on_fail: + dsi_mgr_phy_disable(id); +phy_en_fail: return; } @@ -443,10 +548,17 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) DBG("id=%d", id); - if (!msm_dsi_device_connected(msm_dsi) || - (is_dual_dsi && (DSI_1 == id))) + if (!msm_dsi_device_connected(msm_dsi)) return; + /* + * Do nothing with the host if it is DSI 1 in case of dual DSI. + * It is safe to call dsi_mgr_phy_disable() here because a single PHY + * won't be diabled until both PHYs request disable. + */ + if (is_dual_dsi && (DSI_1 == id)) + goto disable_phy; + if (panel) { ret = drm_panel_disable(panel); if (ret) @@ -481,6 +593,9 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge) pr_err("%s: host1 power off failed, %d\n", __func__, ret); } + +disable_phy: + dsi_mgr_phy_disable(id); } static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge, @@ -540,7 +655,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_connector *connector = NULL; struct dsi_connector *dsi_connector; - int ret, i; + int ret; dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL); if (!dsi_connector) @@ -566,9 +681,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) - drm_mode_connector_attach_encoder(connector, - msm_dsi->encoders[i]); + drm_mode_connector_attach_encoder(connector, msm_dsi->encoder); return connector; } @@ -591,13 +704,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) dsi_bridge->id = id; - /* - * HACK: we may not know the external DSI bridge device's mode - * flags here. We'll get to know them only when the device - * attaches to the dsi host. For now, assume the bridge supports - * DSI video mode - */ - encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; + encoder = msm_dsi->encoder; bridge = &dsi_bridge->base; bridge->funcs = &dsi_mgr_bridge_funcs; @@ -628,13 +735,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) ext_bridge = msm_dsi->external_bridge = msm_dsi_host_get_bridge(msm_dsi->host); - /* - * HACK: we may not know the external DSI bridge device's mode - * flags here. We'll get to know them only when the device - * attaches to the dsi host. For now, assume the bridge supports - * DSI video mode - */ - encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; + encoder = msm_dsi->encoder; /* link the internal dsi bridge to the external bridge */ drm_bridge_attach(encoder, ext_bridge, int_bridge); @@ -662,68 +763,6 @@ void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge) { } -int msm_dsi_manager_phy_enable(int id, - const unsigned long bit_rate, const unsigned long esc_rate, - u32 *clk_pre, u32 *clk_post) -{ - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct msm_dsi_phy *phy = msm_dsi->phy; - int src_pll_id = IS_DUAL_DSI() ? DSI_CLOCK_MASTER : id; - struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); - int ret; - - ret = msm_dsi_phy_enable(phy, src_pll_id, bit_rate, esc_rate); - if (ret) - return ret; - - /* - * Reset DSI PHY silently changes its PLL registers to reset status, - * which will confuse clock driver and result in wrong output rate of - * link clocks. Restore PLL status if its PLL is being used as clock - * source. - */ - if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) { - ret = msm_dsi_pll_restore_state(pll); - if (ret) { - pr_err("%s: failed to restore pll state\n", __func__); - msm_dsi_phy_disable(phy); - return ret; - } - } - - msm_dsi->phy_enabled = true; - msm_dsi_phy_get_clk_pre_post(phy, clk_pre, clk_post); - - return 0; -} - -void msm_dsi_manager_phy_disable(int id) -{ - struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - struct msm_dsi *mdsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER); - struct msm_dsi *sdsi = dsi_mgr_get_dsi(DSI_CLOCK_SLAVE); - struct msm_dsi_phy *phy = msm_dsi->phy; - struct msm_dsi_pll *pll = msm_dsi_phy_get_pll(msm_dsi->phy); - - /* Save PLL status if it is a clock source */ - if (!IS_DUAL_DSI() || (id == DSI_CLOCK_MASTER)) - msm_dsi_pll_save_state(pll); - - /* disable DSI phy - * In dual-dsi configuration, the phy should be disabled for the - * first controller only when the second controller is disabled. - */ - msm_dsi->phy_enabled = false; - if (IS_DUAL_DSI() && mdsi && sdsi) { - if (!mdsi->phy_enabled && !sdsi->phy_enabled) { - msm_dsi_phy_disable(sdsi->phy); - msm_dsi_phy_disable(mdsi->phy); - } - } else { - msm_dsi_phy_disable(phy); - } -} - int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg) { struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); @@ -787,6 +826,33 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len) return true; } +void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags) +{ + struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); + struct drm_device *dev = msm_dsi->dev; + struct msm_drm_private *priv; + struct msm_kms *kms; + struct drm_encoder *encoder; + + /* + * drm_device pointer is assigned to msm_dsi only in the modeset_init + * path. If mipi_dsi_attach() happens in DSI driver's probe path + * (generally the case when we're connected to a drm_panel of the type + * mipi_dsi_device), this would be NULL. In such cases, try to set the + * encoder mode in the DSI connector's detect() op. + */ + if (!dev) + return; + + priv = dev->dev_private; + kms = priv->kms; + encoder = msm_dsi_get_encoder(msm_dsi); + + if (encoder && kms->funcs->set_encoder_mode) + if (!(device_flags & MIPI_DSI_MODE_VIDEO)) + kms->funcs->set_encoder_mode(kms, encoder, true); +} + int msm_dsi_manager_register(struct msm_dsi *msm_dsi) { struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; @@ -811,7 +877,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi) goto fail; } - ret = dsi_mgr_host_register(id); + ret = dsi_mgr_setup_components(id); if (ret) { pr_err("%s: failed to register mipi dsi host for DSI %d\n", __func__, id); diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c index f39386ed75e4..0c2eb9c9a1fc 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c @@ -54,8 +54,10 @@ static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing, } int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; s32 ui, lpx; s32 tmax, tmin; s32 pcnt0 = 10; @@ -115,8 +117,8 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, temp = ((timing->hs_exit >> 1) + 1) * 2 * ui; temp = 60 * coeff + 52 * ui - 24 * ui - temp; tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; - timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false); - + timing->shared_timings.clk_post = linear_inter(tmax, tmin, pcnt2, 0, + false); tmax = 63; temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui; temp += ((timing->clk_zero >> 1) + 1) * 2 * ui; @@ -124,17 +126,21 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1; if (tmin > tmax) { temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false); - timing->clk_pre = temp >> 1; + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = true; } else { - timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = false; } timing->ta_go = 3; timing->ta_sure = 0; timing->ta_get = 4; - DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", - timing->clk_pre, timing->clk_post, timing->clk_zero, + DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit, timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst); @@ -142,6 +148,123 @@ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, return 0; } +int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req) +{ + const unsigned long bit_rate = clk_req->bitclk_rate; + const unsigned long esc_rate = clk_req->escclk_rate; + s32 ui, ui_x8, lpx; + s32 tmax, tmin; + s32 pcnt0 = 50; + s32 pcnt1 = 50; + s32 pcnt2 = 10; + s32 pcnt3 = 30; + s32 pcnt4 = 10; + s32 pcnt5 = 2; + s32 coeff = 1000; /* Precision, should avoid overflow */ + s32 hb_en, hb_en_ckln, pd_ckln, pd; + s32 val, val_ckln; + s32 temp; + + if (!bit_rate || !esc_rate) + return -EINVAL; + + timing->hs_halfbyte_en = 0; + hb_en = 0; + timing->hs_halfbyte_en_ckln = 0; + hb_en_ckln = 0; + timing->hs_prep_dly_ckln = (bit_rate > 100000000) ? 0 : 3; + pd_ckln = timing->hs_prep_dly_ckln; + timing->hs_prep_dly = (bit_rate > 120000000) ? 0 : 1; + pd = timing->hs_prep_dly; + + val = (hb_en << 2) + (pd << 1); + val_ckln = (hb_en_ckln << 2) + (pd_ckln << 1); + + ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000); + ui_x8 = ui << 3; + lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000); + + temp = S_DIV_ROUND_UP(38 * coeff - val_ckln * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (95 * coeff - val_ckln * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, false); + + temp = 300 * coeff - ((timing->clk_prepare << 3) + val_ckln) * ui; + tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; + tmax = (tmin > 255) ? 511 : 255; + timing->clk_zero = linear_inter(tmax, tmin, pcnt5, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui - val * ui, ui_x8); + tmin = max_t(s32, temp, 0); + temp = (85 * coeff + 6 * ui - val * ui) / ui_x8; + tmax = max_t(s32, temp, 0); + timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, false); + + temp = 145 * coeff + 10 * ui - ((timing->hs_prepare << 3) + val) * ui; + tmin = S_DIV_ROUND_UP(temp - 11 * ui, ui_x8) - 3; + tmax = 255; + timing->hs_zero = linear_inter(tmax, tmin, pcnt4, 0, false); + + tmin = DIV_ROUND_UP(60 * coeff + 4 * ui + 3 * ui, ui_x8); + temp = 105 * coeff + 12 * ui - 20 * coeff; + tmax = (temp + 3 * ui) / ui_x8; + timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, false); + + temp = 50 * coeff + ((hb_en << 2) - 8) * ui; + timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8); + + tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1; + tmax = 255; + timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 50 * coeff + ((hb_en_ckln << 2) - 8) * ui; + timing->hs_rqst_ckln = S_DIV_ROUND_UP(temp, ui_x8); + + temp = 60 * coeff + 52 * ui - 43 * ui; + tmin = DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + timing->shared_timings.clk_post = + linear_inter(tmax, tmin, pcnt2, 0, false); + + temp = 8 * ui + ((timing->clk_prepare << 3) + val_ckln) * ui; + temp += (((timing->clk_zero + 3) << 3) + 11 - (pd_ckln << 1)) * ui; + temp += hb_en_ckln ? (((timing->hs_rqst_ckln << 3) + 4) * ui) : + (((timing->hs_rqst_ckln << 3) + 8) * ui); + tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1; + tmax = 63; + if (tmin > tmax) { + temp = linear_inter(tmax << 1, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre = temp >> 1; + timing->shared_timings.clk_pre_inc_by_2 = 1; + } else { + timing->shared_timings.clk_pre = + linear_inter(tmax, tmin, pcnt2, 0, false); + timing->shared_timings.clk_pre_inc_by_2 = 0; + } + + timing->ta_go = 3; + timing->ta_sure = 0; + timing->ta_get = 4; + + DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d", + timing->shared_timings.clk_pre, timing->shared_timings.clk_post, + timing->shared_timings.clk_pre_inc_by_2, timing->clk_zero, + timing->clk_trail, timing->clk_prepare, timing->hs_exit, + timing->hs_zero, timing->hs_prepare, timing->hs_trail, + timing->hs_rqst, timing->hs_rqst_ckln, timing->hs_halfbyte_en, + timing->hs_halfbyte_en_ckln, timing->hs_prep_dly, + timing->hs_prep_dly_ckln); + + return 0; +} + void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask) { @@ -268,6 +391,10 @@ static const struct of_device_id dsi_phy_dt_match[] = { { .compatible = "qcom,dsi-phy-28nm-8960", .data = &dsi_phy_28nm_8960_cfgs }, #endif +#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY + { .compatible = "qcom,dsi-phy-14nm", + .data = &dsi_phy_14nm_cfgs }, +#endif {} }; @@ -295,6 +422,24 @@ static int dsi_phy_get_id(struct msm_dsi_phy *phy) return -EINVAL; } +int msm_dsi_phy_init_common(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + int ret = 0; + + phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", + "DSI_PHY_REG"); + if (IS_ERR(phy->reg_base)) { + dev_err(&pdev->dev, "%s: failed to map phy regulator base\n", + __func__); + ret = -ENOMEM; + goto fail; + } + +fail: + return ret; +} + static int dsi_phy_driver_probe(struct platform_device *pdev) { struct msm_dsi_phy *phy; @@ -331,15 +476,6 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) goto fail; } - phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", - "DSI_PHY_REG"); - if (IS_ERR(phy->reg_base)) { - dev_err(dev, "%s: failed to map phy regulator base\n", - __func__); - ret = -ENOMEM; - goto fail; - } - ret = dsi_phy_regulator_init(phy); if (ret) { dev_err(dev, "%s: failed to init regulator\n", __func__); @@ -353,6 +489,12 @@ static int dsi_phy_driver_probe(struct platform_device *pdev) goto fail; } + if (phy->cfg->ops.init) { + ret = phy->cfg->ops.init(phy); + if (ret) + goto fail; + } + /* PLL init will call into clk_register which requires * register access, so we need to enable power and ahb clock. */ @@ -410,7 +552,7 @@ void __exit msm_dsi_phy_driver_unregister(void) } int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct device *dev = &phy->pdev->dev; int ret; @@ -418,21 +560,52 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, if (!phy || !phy->cfg->ops.enable) return -EINVAL; + ret = dsi_phy_enable_resource(phy); + if (ret) { + dev_err(dev, "%s: resource enable failed, %d\n", + __func__, ret); + goto res_en_fail; + } + ret = dsi_phy_regulator_enable(phy); if (ret) { dev_err(dev, "%s: regulator enable failed, %d\n", __func__, ret); - return ret; + goto reg_en_fail; } - ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate); + ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req); if (ret) { dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret); - dsi_phy_regulator_disable(phy); - return ret; + goto phy_en_fail; + } + + /* + * Resetting DSI PHY silently changes its PLL registers to reset status, + * which will confuse clock driver and result in wrong output rate of + * link clocks. Restore PLL status if its PLL is being used as clock + * source. + */ + if (phy->usecase != MSM_DSI_PHY_SLAVE) { + ret = msm_dsi_pll_restore_state(phy->pll); + if (ret) { + dev_err(dev, "%s: failed to restore pll state, %d\n", + __func__, ret); + goto pll_restor_fail; + } } return 0; + +pll_restor_fail: + if (phy->cfg->ops.disable) + phy->cfg->ops.disable(phy); +phy_en_fail: + dsi_phy_regulator_disable(phy); +reg_en_fail: + dsi_phy_disable_resource(phy); +res_en_fail: + return ret; } void msm_dsi_phy_disable(struct msm_dsi_phy *phy) @@ -440,21 +613,21 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy) if (!phy || !phy->cfg->ops.disable) return; + /* Save PLL status if it is a clock source */ + if (phy->usecase != MSM_DSI_PHY_SLAVE) + msm_dsi_pll_save_state(phy->pll); + phy->cfg->ops.disable(phy); dsi_phy_regulator_disable(phy); + dsi_phy_disable_resource(phy); } -void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, - u32 *clk_pre, u32 *clk_post) +void msm_dsi_phy_get_shared_timings(struct msm_dsi_phy *phy, + struct msm_dsi_phy_shared_timings *shared_timings) { - if (!phy) - return; - - if (clk_pre) - *clk_pre = phy->timing.clk_pre; - if (clk_post) - *clk_post = phy->timing.clk_post; + memcpy(shared_timings, &phy->timing.shared_timings, + sizeof(*shared_timings)); } struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) @@ -465,3 +638,9 @@ struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy) return phy->pll; } +void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy, + enum msm_dsi_phy_usecase uc) +{ + if (phy) + phy->usecase = uc; +} diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h index f24a85439b94..1733f6608a09 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h @@ -22,8 +22,9 @@ #define dsi_phy_write(offset, data) msm_writel((data), (offset)) struct msm_dsi_phy_ops { + int (*init) (struct msm_dsi_phy *phy); int (*enable)(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate); + struct msm_dsi_phy_clk_request *clk_req); void (*disable)(struct msm_dsi_phy *phy); }; @@ -46,6 +47,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs; extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs; +extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs; struct msm_dsi_dphy_timing { u32 clk_pre; @@ -61,12 +63,22 @@ struct msm_dsi_dphy_timing { u32 ta_go; u32 ta_sure; u32 ta_get; + + struct msm_dsi_phy_shared_timings shared_timings; + + /* For PHY v2 only */ + u32 hs_rqst_ckln; + u32 hs_prep_dly; + u32 hs_prep_dly_ckln; + u8 hs_halfbyte_en; + u8 hs_halfbyte_en_ckln; }; struct msm_dsi_phy { struct platform_device *pdev; void __iomem *base; void __iomem *reg_base; + void __iomem *lane_base; int id; struct clk *ahb_clk; @@ -75,6 +87,7 @@ struct msm_dsi_phy { struct msm_dsi_dphy_timing timing; const struct msm_dsi_phy_cfg *cfg; + enum msm_dsi_phy_usecase usecase; bool regulator_ldo_mode; struct msm_dsi_pll *pll; @@ -84,9 +97,12 @@ struct msm_dsi_phy { * PHY internal functions */ int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing, - const unsigned long bit_rate, const unsigned long esc_rate); + struct msm_dsi_phy_clk_request *clk_req); +int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing, + struct msm_dsi_phy_clk_request *clk_req); void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg, u32 bit_mask); +int msm_dsi_phy_init_common(struct msm_dsi_phy *phy); #endif /* __DSI_PHY_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c new file mode 100644 index 000000000000..513f4234adc1 --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "dsi_phy.h" +#include "dsi.xml.h" + +#define PHY_14NM_CKLN_IDX 4 + +static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy, + struct msm_dsi_dphy_timing *timing, + int lane_idx) +{ + void __iomem *base = phy->lane_base; + bool clk_ln = (lane_idx == PHY_14NM_CKLN_IDX); + u32 zero = clk_ln ? timing->clk_zero : timing->hs_zero; + u32 prepare = clk_ln ? timing->clk_prepare : timing->hs_prepare; + u32 trail = clk_ln ? timing->clk_trail : timing->hs_trail; + u32 rqst = clk_ln ? timing->hs_rqst_ckln : timing->hs_rqst; + u32 prep_dly = clk_ln ? timing->hs_prep_dly_ckln : timing->hs_prep_dly; + u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln : + timing->hs_halfbyte_en; + + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx), + DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx), + halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) | + DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get)); + dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx), + DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0)); +} + +static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, + struct msm_dsi_phy_clk_request *clk_req) +{ + struct msm_dsi_dphy_timing *timing = &phy->timing; + u32 data; + int i; + int ret; + void __iomem *base = phy->base; + void __iomem *lane_base = phy->lane_base; + + if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) { + dev_err(&phy->pdev->dev, + "%s: D-PHY timing calculation failed\n", __func__); + return -EINVAL; + } + + data = 0x1c; + if (phy->usecase != MSM_DSI_PHY_STANDALONE) + data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32); + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); + + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1); + + /* 4 data lanes + 1 clk lane configuration */ + for (i = 0; i < 5; i++) { + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i), + 0x1d); + + dsi_phy_write(lane_base + + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff); + dsi_phy_write(lane_base + + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i), + (i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06); + + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i), + (i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i), + 0); + dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i), + 0x88); + + dsi_14nm_dphy_set_timing(phy, timing, i); + } + + /* Make sure PLL is not start */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00); + + wmb(); /* make sure everything is written before reset and enable */ + + /* reset digital block */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80); + wmb(); /* ensure reset is asserted */ + udelay(100); + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00); + + msm_dsi_phy_set_src_pll(phy, src_pll_id, + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, + DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL); + + ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase); + if (ret) { + dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n", + __func__, ret); + return ret; + } + + /* Remove power down from PLL and all lanes */ + dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff); + + return 0; +} + +static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy) +{ + dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0); + dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0); + + /* ensure that the phy is completely disabled */ + wmb(); +} + +static int dsi_14nm_phy_init(struct msm_dsi_phy *phy) +{ + struct platform_device *pdev = phy->pdev; + + phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane", + "DSI_PHY_LANE"); + if (IS_ERR(phy->lane_base)) { + dev_err(&pdev->dev, "%s: failed to map phy lane base\n", + __func__); + return -ENOMEM; + } + + return 0; +} + +const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = { + .type = MSM_DSI_PHY_14NM, + .src_pll_truthtable = { {false, false}, {true, false} }, + .reg_cfg = { + .num = 1, + .regs = { + {"vcca", 17000, 32}, + }, + }, + .ops = { + .enable = dsi_14nm_phy_enable, + .disable = dsi_14nm_phy_disable, + .init = dsi_14nm_phy_init, + }, + .io_start = { 0x994400, 0x996400 }, + .num_dsi_phy = 2, +}; diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c index c757e2070cac..1ca6c69516f5 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c @@ -72,7 +72,7 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) } static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_dphy_timing *timing = &phy->timing; int i; @@ -81,7 +81,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, DBG(""); - if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { dev_err(&phy->pdev->dev, "%s: D-PHY timing calculation failed\n", __func__); return -EINVAL; @@ -145,6 +145,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = { .ops = { .enable = dsi_20nm_phy_enable, .disable = dsi_20nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0xfd998300, 0xfd9a0300 }, .num_dsi_phy = 2, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c index 63d7fba31380..4972b52cbe44 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c @@ -67,7 +67,7 @@ static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable) } static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_dphy_timing *timing = &phy->timing; int i; @@ -75,7 +75,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, DBG(""); - if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { dev_err(&phy->pdev->dev, "%s: D-PHY timing calculation failed\n", __func__); return -EINVAL; @@ -144,6 +144,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = { .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0xfd922b00, 0xfd923100 }, .num_dsi_phy = 2, @@ -161,6 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0x1a98500 }, .num_dsi_phy = 1, diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c index 7bdb9de54968..398004463498 100644 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c @@ -124,14 +124,14 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy) } static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id, - const unsigned long bit_rate, const unsigned long esc_rate) + struct msm_dsi_phy_clk_request *clk_req) { struct msm_dsi_dphy_timing *timing = &phy->timing; void __iomem *base = phy->base; DBG(""); - if (msm_dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) { + if (msm_dsi_dphy_timing_calc(timing, clk_req)) { dev_err(&phy->pdev->dev, "%s: D-PHY timing calculation failed\n", __func__); return -EINVAL; @@ -191,6 +191,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = { .ops = { .enable = dsi_28nm_phy_enable, .disable = dsi_28nm_phy_disable, + .init = msm_dsi_phy_init_common, }, .io_start = { 0x4700300, 0x5800300 }, .num_dsi_phy = 2, diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c index 5cd438f91afe..bc289f5c9078 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c @@ -140,6 +140,15 @@ int msm_dsi_pll_restore_state(struct msm_dsi_pll *pll) return 0; } +int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + if (pll->set_usecase) + return pll->set_usecase(pll, uc); + + return 0; +} + struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, enum msm_dsi_phy_type type, int id) { @@ -154,6 +163,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev, case MSM_DSI_PHY_28NM_8960: pll = msm_dsi_pll_28nm_8960_init(pdev, id); break; + case MSM_DSI_PHY_14NM: + pll = msm_dsi_pll_14nm_init(pdev, id); + break; default: pll = ERR_PTR(-ENXIO); break; diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h index 2cf1664723e8..f63e7ada74a8 100644 --- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h @@ -41,6 +41,8 @@ struct msm_dsi_pll { void (*destroy)(struct msm_dsi_pll *pll); void (*save_state)(struct msm_dsi_pll *pll); int (*restore_state)(struct msm_dsi_pll *pll); + int (*set_usecase)(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc); }; #define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw) @@ -104,5 +106,14 @@ static inline struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init( } #endif +#ifdef CONFIG_DRM_MSM_DSI_14NM_PHY +struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id); +#else +static inline struct msm_dsi_pll * +msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) +{ + return ERR_PTR(-ENODEV); +} +#endif #endif /* __DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c new file mode 100644 index 000000000000..fe15aa64086f --- /dev/null +++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c @@ -0,0 +1,1104 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> + +#include "dsi_pll.h" +#include "dsi.xml.h" + +/* + * DSI PLL 14nm - clock diagram (eg: DSI0): + * + * dsi0n1_postdiv_clk + * | + * | + * +----+ | +----+ + * dsi0vco_clk ---| n1 |--o--| /8 |-- dsi0pllbyte + * +----+ | +----+ + * | dsi0n1_postdivby2_clk + * | +----+ | + * o---| /2 |--o--|\ + * | +----+ | \ +----+ + * | | |--| n2 |-- dsi0pll + * o--------------| / +----+ + * |/ + */ + +#define POLL_MAX_READS 15 +#define POLL_TIMEOUT_US 1000 + +#define NUM_PROVIDED_CLKS 2 + +#define VCO_REF_CLK_RATE 19200000 +#define VCO_MIN_RATE 1300000000UL +#define VCO_MAX_RATE 2600000000UL + +#define DSI_BYTE_PLL_CLK 0 +#define DSI_PIXEL_PLL_CLK 1 + +#define DSI_PLL_DEFAULT_VCO_POSTDIV 1 + +struct dsi_pll_input { + u32 fref; /* reference clk */ + u32 fdata; /* bit clock rate */ + u32 dsiclk_sel; /* Mux configuration (see diagram) */ + u32 ssc_en; /* SSC enable/disable */ + u32 ldo_en; + + /* fixed params */ + u32 refclk_dbler_en; + u32 vco_measure_time; + u32 kvco_measure_time; + u32 bandgap_timer; + u32 pll_wakeup_timer; + u32 plllock_cnt; + u32 plllock_rng; + u32 ssc_center; + u32 ssc_adj_period; + u32 ssc_spread; + u32 ssc_freq; + u32 pll_ie_trim; + u32 pll_ip_trim; + u32 pll_iptat_trim; + u32 pll_cpcset_cur; + u32 pll_cpmset_cur; + + u32 pll_icpmset; + u32 pll_icpcset; + + u32 pll_icpmset_p; + u32 pll_icpmset_m; + + u32 pll_icpcset_p; + u32 pll_icpcset_m; + + u32 pll_lpf_res1; + u32 pll_lpf_cap1; + u32 pll_lpf_cap2; + u32 pll_c3ctrl; + u32 pll_r3ctrl; +}; + +struct dsi_pll_output { + u32 pll_txclk_en; + u32 dec_start; + u32 div_frac_start; + u32 ssc_period; + u32 ssc_step_size; + u32 plllock_cmp; + u32 pll_vco_div_ref; + u32 pll_vco_count; + u32 pll_kvco_div_ref; + u32 pll_kvco_count; + u32 pll_misc1; + u32 pll_lpf2_postdiv; + u32 pll_resetsm_cntrl; + u32 pll_resetsm_cntrl2; + u32 pll_resetsm_cntrl5; + u32 pll_kvco_code; + + u32 cmn_clk_cfg0; + u32 cmn_clk_cfg1; + u32 cmn_ldo_cntrl; + + u32 pll_postdiv; + u32 fcvo; +}; + +struct pll_14nm_cached_state { + unsigned long vco_rate; + u8 n2postdiv; + u8 n1postdiv; +}; + +struct dsi_pll_14nm { + struct msm_dsi_pll base; + + int id; + struct platform_device *pdev; + + void __iomem *phy_cmn_mmio; + void __iomem *mmio; + + int vco_delay; + + struct dsi_pll_input in; + struct dsi_pll_output out; + + /* protects REG_DSI_14nm_PHY_CMN_CLK_CFG0 register */ + spinlock_t postdiv_lock; + + u64 vco_current_rate; + u64 vco_ref_clk_rate; + + /* private clocks: */ + struct clk_hw *hws[NUM_DSI_CLOCKS_MAX]; + u32 num_hws; + + /* clock-provider: */ + struct clk_hw_onecell_data *hw_data; + + struct pll_14nm_cached_state cached_state; + + enum msm_dsi_phy_usecase uc; + struct dsi_pll_14nm *slave; +}; + +#define to_pll_14nm(x) container_of(x, struct dsi_pll_14nm, base) + +/* + * Private struct for N1/N2 post-divider clocks. These clocks are similar to + * the generic clk_divider class of clocks. The only difference is that it + * also sets the slave DSI PLL's post-dividers if in Dual DSI mode + */ +struct dsi_pll_14nm_postdiv { + struct clk_hw hw; + + /* divider params */ + u8 shift; + u8 width; + u8 flags; /* same flags as used by clk_divider struct */ + + struct dsi_pll_14nm *pll; +}; + +#define to_pll_14nm_postdiv(_hw) container_of(_hw, struct dsi_pll_14nm_postdiv, hw) + +/* + * Global list of private DSI PLL struct pointers. We need this for Dual DSI + * mode, where the master PLL's clk_ops needs access the slave's private data + */ +static struct dsi_pll_14nm *pll_14nm_list[DSI_MAX]; + +static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm, + u32 nb_tries, u32 timeout_us) +{ + bool pll_locked = false; + void __iomem *base = pll_14nm->mmio; + u32 tries, val; + + tries = nb_tries; + while (tries--) { + val = pll_read(base + + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_locked = !!(val & BIT(5)); + + if (pll_locked) + break; + + udelay(timeout_us); + } + + if (!pll_locked) { + tries = nb_tries; + while (tries--) { + val = pll_read(base + + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS); + pll_locked = !!(val & BIT(0)); + + if (pll_locked) + break; + + udelay(timeout_us); + } + } + + DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* "); + + return pll_locked; +} + +static void dsi_pll_14nm_input_init(struct dsi_pll_14nm *pll) +{ + pll->in.fref = pll->vco_ref_clk_rate; + pll->in.fdata = 0; + pll->in.dsiclk_sel = 1; /* Use the /2 path in Mux */ + pll->in.ldo_en = 0; /* disabled for now */ + + /* fixed input */ + pll->in.refclk_dbler_en = 0; + pll->in.vco_measure_time = 5; + pll->in.kvco_measure_time = 5; + pll->in.bandgap_timer = 4; + pll->in.pll_wakeup_timer = 5; + pll->in.plllock_cnt = 1; + pll->in.plllock_rng = 0; + + /* + * SSC is enabled by default. We might need DT props for configuring + * some SSC params like PPM and center/down spread etc. + */ + pll->in.ssc_en = 1; + pll->in.ssc_center = 0; /* down spread by default */ + pll->in.ssc_spread = 5; /* PPM / 1000 */ + pll->in.ssc_freq = 31500; /* default recommended */ + pll->in.ssc_adj_period = 37; + + pll->in.pll_ie_trim = 4; + pll->in.pll_ip_trim = 4; + pll->in.pll_cpcset_cur = 1; + pll->in.pll_cpmset_cur = 1; + pll->in.pll_icpmset = 4; + pll->in.pll_icpcset = 4; + pll->in.pll_icpmset_p = 0; + pll->in.pll_icpmset_m = 0; + pll->in.pll_icpcset_p = 0; + pll->in.pll_icpcset_m = 0; + pll->in.pll_lpf_res1 = 3; + pll->in.pll_lpf_cap1 = 11; + pll->in.pll_lpf_cap2 = 1; + pll->in.pll_iptat_trim = 7; + pll->in.pll_c3ctrl = 2; + pll->in.pll_r3ctrl = 1; +} + +#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) + +static void pll_14nm_ssc_calc(struct dsi_pll_14nm *pll) +{ + u32 period, ssc_period; + u32 ref, rem; + u64 step_size; + + DBG("vco=%lld ref=%lld", pll->vco_current_rate, pll->vco_ref_clk_rate); + + ssc_period = pll->in.ssc_freq / 500; + period = (u32)pll->vco_ref_clk_rate / 1000; + ssc_period = CEIL(period, ssc_period); + ssc_period -= 1; + pll->out.ssc_period = ssc_period; + + DBG("ssc freq=%d spread=%d period=%d", pll->in.ssc_freq, + pll->in.ssc_spread, pll->out.ssc_period); + + step_size = (u32)pll->vco_current_rate; + ref = pll->vco_ref_clk_rate; + ref /= 1000; + step_size = div_u64(step_size, ref); + step_size <<= 20; + step_size = div_u64(step_size, 1000); + step_size *= pll->in.ssc_spread; + step_size = div_u64(step_size, 1000); + step_size *= (pll->in.ssc_adj_period + 1); + + rem = 0; + step_size = div_u64_rem(step_size, ssc_period + 1, &rem); + if (rem) + step_size++; + + DBG("step_size=%lld", step_size); + + step_size &= 0x0ffff; /* take lower 16 bits */ + + pll->out.ssc_step_size = step_size; +} + +static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll) +{ + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u64 multiplier = BIT(20); + u64 dec_start_multiple, dec_start, pll_comp_val; + u32 duration, div_frac_start; + u64 vco_clk_rate = pll->vco_current_rate; + u64 fref = pll->vco_ref_clk_rate; + + DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref); + + dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref); + div_u64_rem(dec_start_multiple, multiplier, &div_frac_start); + + dec_start = div_u64(dec_start_multiple, multiplier); + + pout->dec_start = (u32)dec_start; + pout->div_frac_start = div_frac_start; + + if (pin->plllock_cnt == 0) + duration = 1024; + else if (pin->plllock_cnt == 1) + duration = 256; + else if (pin->plllock_cnt == 2) + duration = 128; + else + duration = 32; + + pll_comp_val = duration * dec_start_multiple; + pll_comp_val = div_u64(pll_comp_val, multiplier); + do_div(pll_comp_val, 10); + + pout->plllock_cmp = (u32)pll_comp_val; + + pout->pll_txclk_en = 1; + pout->cmn_ldo_cntrl = 0x3c; +} + +static u32 pll_14nm_kvco_slop(u32 vrate) +{ + u32 slop = 0; + + if (vrate > VCO_MIN_RATE && vrate <= 1800000000UL) + slop = 600; + else if (vrate > 1800000000UL && vrate < 2300000000UL) + slop = 400; + else if (vrate > 2300000000UL && vrate < VCO_MAX_RATE) + slop = 280; + + return slop; +} + +static void pll_14nm_calc_vco_count(struct dsi_pll_14nm *pll) +{ + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u64 vco_clk_rate = pll->vco_current_rate; + u64 fref = pll->vco_ref_clk_rate; + u64 data; + u32 cnt; + + data = fref * pin->vco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 2; + pout->pll_vco_div_ref = data; + + data = div_u64(vco_clk_rate, 1000000); /* unit is Mhz */ + data *= pin->vco_measure_time; + do_div(data, 10); + pout->pll_vco_count = data; + + data = fref * pin->kvco_measure_time; + do_div(data, 1000000); + data &= 0x03ff; /* 10 bits */ + data -= 1; + pout->pll_kvco_div_ref = data; + + cnt = pll_14nm_kvco_slop(vco_clk_rate); + cnt *= 2; + cnt /= 100; + cnt *= pin->kvco_measure_time; + pout->pll_kvco_count = cnt; + + pout->pll_misc1 = 16; + pout->pll_resetsm_cntrl = 48; + pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3; + pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer; + pout->pll_kvco_code = 0; +} + +static void pll_db_commit_ssc(struct dsi_pll_14nm *pll) +{ + void __iomem *base = pll->mmio; + struct dsi_pll_input *pin = &pll->in; + struct dsi_pll_output *pout = &pll->out; + u8 data; + + data = pin->ssc_adj_period; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data); + data = (pin->ssc_adj_period >> 8); + data &= 0x03; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data); + + data = pout->ssc_period; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data); + data = (pout->ssc_period >> 8); + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data); + + data = pout->ssc_step_size; + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data); + data = (pout->ssc_step_size >> 8); + data &= 0x0ff; + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data); + + data = (pin->ssc_center & 0x01); + data <<= 1; + data |= 0x01; /* enable */ + pll_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data); + + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_common(struct dsi_pll_14nm *pll, + struct dsi_pll_input *pin, + struct dsi_pll_output *pout) +{ + void __iomem *base = pll->mmio; + u8 data; + + /* confgiure the non frequency dependent pll registers */ + data = 0; + pll_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data); + + data = pout->pll_txclk_en; + pll_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, data); + + data = pout->pll_resetsm_cntrl; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, data); + data = pout->pll_resetsm_cntrl2; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, data); + data = pout->pll_resetsm_cntrl5; + pll_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, data); + + data = pout->pll_vco_div_ref & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data); + data = (pout->pll_vco_div_ref >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data); + + data = pout->pll_kvco_div_ref & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data); + data = (pout->pll_kvco_div_ref >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data); + + data = pout->pll_misc1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, data); + + data = pin->pll_ie_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, data); + + data = pin->pll_ip_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, data); + + data = pin->pll_cpmset_cur << 3 | pin->pll_cpcset_cur; + pll_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, data); + + data = pin->pll_icpcset_p << 3 | pin->pll_icpcset_m; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, data); + + data = pin->pll_icpmset_p << 3 | pin->pll_icpcset_m; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, data); + + data = pin->pll_icpmset << 3 | pin->pll_icpcset; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, data); + + data = pin->pll_lpf_cap2 << 4 | pin->pll_lpf_cap1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, data); + + data = pin->pll_iptat_trim; + pll_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, data); + + data = pin->pll_c3ctrl | pin->pll_r3ctrl << 4; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, data); +} + +static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm) +{ + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + + /* de assert pll start and apply pll sw reset */ + + /* stop pll */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); + + /* pll sw reset */ + pll_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10); + wmb(); /* make sure register committed */ + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0); + wmb(); /* make sure register committed */ +} + +static void pll_db_commit_14nm(struct dsi_pll_14nm *pll, + struct dsi_pll_input *pin, + struct dsi_pll_output *pout) +{ + void __iomem *base = pll->mmio; + void __iomem *cmn_base = pll->phy_cmn_mmio; + u8 data; + + DBG("DSI%d PLL", pll->id); + + data = pout->cmn_ldo_cntrl; + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data); + + pll_db_commit_common(pll, pin, pout); + + pll_14nm_software_reset(pll); + + data = pin->dsiclk_sel; /* set dsiclk_sel = 1 */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, data); + + data = 0xff; /* data, clk, pll normal operation */ + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data); + + /* configure the frequency dependent pll registers */ + data = pout->dec_start; + pll_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data); + + data = pout->div_frac_start & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data); + data = (pout->div_frac_start >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data); + data = (pout->div_frac_start >> 16) & 0xf; + pll_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data); + + data = pout->plllock_cmp & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data); + + data = (pout->plllock_cmp >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data); + + data = (pout->plllock_cmp >> 16) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data); + + data = pin->plllock_cnt << 1 | pin->plllock_rng << 3; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data); + + data = pout->pll_vco_count & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data); + data = (pout->pll_vco_count >> 8) & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data); + + data = pout->pll_kvco_count & 0xff; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data); + data = (pout->pll_kvco_count >> 8) & 0x3; + pll_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data); + + data = (pout->pll_postdiv - 1) << 4 | pin->pll_lpf_res1; + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, data); + + if (pin->ssc_en) + pll_db_commit_ssc(pll); + + wmb(); /* make sure register committed */ +} + +/* + * VCO clock Callbacks + */ +static int dsi_pll_14nm_vco_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct dsi_pll_input *pin = &pll_14nm->in; + struct dsi_pll_output *pout = &pll_14nm->out; + + DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_14nm->id, rate, + parent_rate); + + pll_14nm->vco_current_rate = rate; + pll_14nm->vco_ref_clk_rate = VCO_REF_CLK_RATE; + + dsi_pll_14nm_input_init(pll_14nm); + + /* + * This configures the post divider internal to the VCO. It's + * fixed to divide by 1 for now. + * + * tx_band = pll_postdiv. + * 0: divided by 1 + * 1: divided by 2 + * 2: divided by 4 + * 3: divided by 8 + */ + pout->pll_postdiv = DSI_PLL_DEFAULT_VCO_POSTDIV; + + pll_14nm_dec_frac_calc(pll_14nm); + + if (pin->ssc_en) + pll_14nm_ssc_calc(pll_14nm); + + pll_14nm_calc_vco_count(pll_14nm); + + /* commit the slave DSI PLL registers if we're master. Note that we + * don't lock the slave PLL. We just ensure that the PLL/PHY registers + * of the master and slave are identical + */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + + pll_db_commit_14nm(pll_14nm_slave, pin, pout); + } + + pll_db_commit_14nm(pll_14nm, pin, pout); + + return 0; +} + +static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct msm_dsi_pll *pll = hw_clk_to_pll(hw); + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + u64 vco_rate, multiplier = BIT(20); + u32 div_frac_start; + u32 dec_start; + u64 ref_clk = parent_rate; + + dec_start = pll_read(base + REG_DSI_14nm_PHY_PLL_DEC_START); + dec_start &= 0x0ff; + + DBG("dec_start = %x", dec_start); + + div_frac_start = (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3) + & 0xf) << 16; + div_frac_start |= (pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2) + & 0xff) << 8; + div_frac_start |= pll_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1) + & 0xff; + + DBG("div_frac_start = %x", div_frac_start); + + vco_rate = ref_clk * dec_start; + + vco_rate += ((ref_clk * div_frac_start) / multiplier); + + /* + * Recalculating the rate from dec_start and frac_start doesn't end up + * the rate we originally set. Convert the freq to KHz, round it up and + * convert it back to MHz. + */ + vco_rate = DIV_ROUND_UP_ULL(vco_rate, 1000) * 1000; + + DBG("returning vco rate = %lu", (unsigned long)vco_rate); + + return (unsigned long)vco_rate; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_vco = { + .round_rate = msm_dsi_pll_helper_clk_round_rate, + .set_rate = dsi_pll_14nm_vco_set_rate, + .recalc_rate = dsi_pll_14nm_vco_recalc_rate, + .prepare = msm_dsi_pll_helper_clk_prepare, + .unprepare = msm_dsi_pll_helper_clk_unprepare, +}; + +/* + * N1 and N2 post-divider clock callbacks + */ +#define div_mask(width) ((1 << (width)) - 1) +static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy_cmn_mmio; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + u32 val; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, parent_rate); + + val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift; + val &= div_mask(width); + + return divider_recalc_rate(hw, parent_rate, val, NULL, + postdiv->flags); +} + +static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long *prate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + + DBG("DSI%d PLL parent rate=%lu", pll_14nm->id, rate); + + return divider_round_rate(hw, rate, prate, NULL, + postdiv->width, + postdiv->flags); +} + +static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw); + struct dsi_pll_14nm *pll_14nm = postdiv->pll; + void __iomem *base = pll_14nm->phy_cmn_mmio; + spinlock_t *lock = &pll_14nm->postdiv_lock; + u8 shift = postdiv->shift; + u8 width = postdiv->width; + unsigned int value; + unsigned long flags = 0; + u32 val; + + DBG("DSI%d PLL parent rate=%lu parent rate %lu", pll_14nm->id, rate, + parent_rate); + + value = divider_get_val(rate, parent_rate, NULL, postdiv->width, + postdiv->flags); + + spin_lock_irqsave(lock, flags); + + val = pll_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + val &= ~(div_mask(width) << shift); + + val |= value << shift; + pll_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); + + /* If we're master in dual DSI mode, then the slave PLL's post-dividers + * follow the master's post dividers + */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; + + pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val); + } + + spin_unlock_irqrestore(lock, flags); + + return 0; +} + +static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = { + .recalc_rate = dsi_pll_14nm_postdiv_recalc_rate, + .round_rate = dsi_pll_14nm_postdiv_round_rate, + .set_rate = dsi_pll_14nm_postdiv_set_rate, +}; + +/* + * PLL Callbacks + */ + +static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + bool locked; + + DBG(""); + + pll_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10); + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1); + + locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS, + POLL_TIMEOUT_US); + + if (unlikely(!locked)) + dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n"); + else + DBG("DSI PLL lock success"); + + return locked ? 0 : -EINVAL; +} + +static void dsi_pll_14nm_disable_seq(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + + DBG(""); + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0); +} + +static void dsi_pll_14nm_save_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + u32 data; + + data = pll_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0); + + cached_state->n1postdiv = data & 0xf; + cached_state->n2postdiv = (data >> 4) & 0xf; + + DBG("DSI%d PLL save state %x %x", pll_14nm->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw); +} + +static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct pll_14nm_cached_state *cached_state = &pll_14nm->cached_state; + void __iomem *cmn_base = pll_14nm->phy_cmn_mmio; + u32 data; + int ret; + + ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw, + cached_state->vco_rate, 0); + if (ret) { + dev_err(&pll_14nm->pdev->dev, + "restore vco rate failed. ret=%d\n", ret); + return ret; + } + + data = cached_state->n1postdiv | (cached_state->n2postdiv << 4); + + DBG("DSI%d PLL restore state %x %x", pll_14nm->id, + cached_state->n1postdiv, cached_state->n2postdiv); + + pll_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); + + /* also restore post-dividers for slave DSI PLL */ + if (pll_14nm->uc == MSM_DSI_PHY_MASTER) { + struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave; + void __iomem *slave_base = pll_14nm_slave->phy_cmn_mmio; + + pll_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data); + } + + return 0; +} + +static int dsi_pll_14nm_set_usecase(struct msm_dsi_pll *pll, + enum msm_dsi_phy_usecase uc) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + void __iomem *base = pll_14nm->mmio; + u32 clkbuflr_en, bandgap = 0; + + switch (uc) { + case MSM_DSI_PHY_STANDALONE: + clkbuflr_en = 0x1; + break; + case MSM_DSI_PHY_MASTER: + clkbuflr_en = 0x3; + pll_14nm->slave = pll_14nm_list[(pll_14nm->id + 1) % DSI_MAX]; + break; + case MSM_DSI_PHY_SLAVE: + clkbuflr_en = 0x0; + bandgap = 0x3; + break; + default: + return -EINVAL; + } + + pll_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en); + if (bandgap) + pll_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap); + + pll_14nm->uc = uc; + + return 0; +} + +static int dsi_pll_14nm_get_provider(struct msm_dsi_pll *pll, + struct clk **byte_clk_provider, + struct clk **pixel_clk_provider) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct clk_hw_onecell_data *hw_data = pll_14nm->hw_data; + + if (byte_clk_provider) + *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk; + if (pixel_clk_provider) + *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk; + + return 0; +} + +static void dsi_pll_14nm_destroy(struct msm_dsi_pll *pll) +{ + struct dsi_pll_14nm *pll_14nm = to_pll_14nm(pll); + struct platform_device *pdev = pll_14nm->pdev; + int num_hws = pll_14nm->num_hws; + + of_clk_del_provider(pdev->dev.of_node); + + while (num_hws--) + clk_hw_unregister(pll_14nm->hws[num_hws]); +} + +static struct clk_hw *pll_14nm_postdiv_register(struct dsi_pll_14nm *pll_14nm, + const char *name, + const char *parent_name, + unsigned long flags, + u8 shift) +{ + struct dsi_pll_14nm_postdiv *pll_postdiv; + struct device *dev = &pll_14nm->pdev->dev; + struct clk_init_data postdiv_init = { + .parent_names = (const char *[]) { parent_name }, + .num_parents = 1, + .name = name, + .flags = flags, + .ops = &clk_ops_dsi_pll_14nm_postdiv, + }; + int ret; + + pll_postdiv = devm_kzalloc(dev, sizeof(*pll_postdiv), GFP_KERNEL); + if (!pll_postdiv) + return ERR_PTR(-ENOMEM); + + pll_postdiv->pll = pll_14nm; + pll_postdiv->shift = shift; + /* both N1 and N2 postdividers are 4 bits wide */ + pll_postdiv->width = 4; + /* range of each divider is from 1 to 15 */ + pll_postdiv->flags = CLK_DIVIDER_ONE_BASED; + pll_postdiv->hw.init = &postdiv_init; + + ret = clk_hw_register(dev, &pll_postdiv->hw); + if (ret) + return ERR_PTR(ret); + + return &pll_postdiv->hw; +} + +static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm) +{ + char clk_name[32], parent[32], vco_name[32]; + struct clk_init_data vco_init = { + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .name = vco_name, + .flags = CLK_IGNORE_UNUSED, + .ops = &clk_ops_dsi_pll_14nm_vco, + }; + struct device *dev = &pll_14nm->pdev->dev; + struct clk_hw **hws = pll_14nm->hws; + struct clk_hw_onecell_data *hw_data; + struct clk_hw *hw; + int num = 0; + int ret; + + DBG("DSI%d", pll_14nm->id); + + hw_data = devm_kzalloc(dev, sizeof(*hw_data) + + NUM_PROVIDED_CLKS * sizeof(struct clk_hw *), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + snprintf(vco_name, 32, "dsi%dvco_clk", pll_14nm->id); + pll_14nm->base.clk_hw.init = &vco_init; + + ret = clk_hw_register(dev, &pll_14nm->base.clk_hw); + if (ret) + return ret; + + hws[num++] = &pll_14nm->base.clk_hw; + + snprintf(clk_name, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + snprintf(parent, 32, "dsi%dvco_clk", pll_14nm->id); + + /* N1 postdiv, bits 0-3 in REG_DSI_14nm_PHY_CMN_CLK_CFG0 */ + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, + CLK_SET_RATE_PARENT, 0); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpllbyte", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + + /* DSI Byte clock = VCO_CLK / N1 / 8 */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, + CLK_SET_RATE_PARENT, 1, 8); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_BYTE_PLL_CLK] = hw; + + snprintf(clk_name, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdiv_clk", pll_14nm->id); + + /* + * Skip the mux for now, force DSICLK_SEL to 1, Add a /2 divider + * on the way. Don't let it set parent. + */ + hw = clk_hw_register_fixed_factor(dev, clk_name, parent, 0, 1, 2); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + + snprintf(clk_name, 32, "dsi%dpll", pll_14nm->id); + snprintf(parent, 32, "dsi%dn1_postdivby2_clk", pll_14nm->id); + + /* DSI pixel clock = VCO_CLK / N1 / 2 / N2 + * This is the output of N2 post-divider, bits 4-7 in + * REG_DSI_14nm_PHY_CMN_CLK_CFG0. Don't let it set parent. + */ + hw = pll_14nm_postdiv_register(pll_14nm, clk_name, parent, 0, 4); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + hws[num++] = hw; + hw_data->hws[DSI_PIXEL_PLL_CLK] = hw; + + pll_14nm->num_hws = num; + + hw_data->num = NUM_PROVIDED_CLKS; + pll_14nm->hw_data = hw_data; + + ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get, + pll_14nm->hw_data); + if (ret) { + dev_err(dev, "failed to register clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id) +{ + struct dsi_pll_14nm *pll_14nm; + struct msm_dsi_pll *pll; + int ret; + + if (!pdev) + return ERR_PTR(-ENODEV); + + pll_14nm = devm_kzalloc(&pdev->dev, sizeof(*pll_14nm), GFP_KERNEL); + if (!pll_14nm) + return ERR_PTR(-ENOMEM); + + DBG("PLL%d", id); + + pll_14nm->pdev = pdev; + pll_14nm->id = id; + pll_14nm_list[id] = pll_14nm; + + pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); + if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) { + dev_err(&pdev->dev, "failed to map CMN PHY base\n"); + return ERR_PTR(-ENOMEM); + } + + pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL"); + if (IS_ERR_OR_NULL(pll_14nm->mmio)) { + dev_err(&pdev->dev, "failed to map PLL base\n"); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&pll_14nm->postdiv_lock); + + pll = &pll_14nm->base; + pll->min_rate = VCO_MIN_RATE; + pll->max_rate = VCO_MAX_RATE; + pll->get_provider = dsi_pll_14nm_get_provider; + pll->destroy = dsi_pll_14nm_destroy; + pll->disable_seq = dsi_pll_14nm_disable_seq; + pll->save_state = dsi_pll_14nm_save_state; + pll->restore_state = dsi_pll_14nm_restore_state; + pll->set_usecase = dsi_pll_14nm_set_usecase; + + pll_14nm->vco_delay = 1; + + pll->en_seq_cnt = 1; + pll->enable_seqs[0] = dsi_pll_14nm_enable_seq; + + ret = pll_14nm_register(pll_14nm); + if (ret) { + dev_err(&pdev->dev, "failed to register PLL: %d\n", ret); + return ERR_PTR(ret); + } + + return pll; +} diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index b782efd4b95f..94ea963519b2 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -260,8 +260,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, struct drm_encoder *encoder; struct drm_connector *connector; struct device_node *panel_node; - struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; - int i, dsi_id; + int dsi_id; int ret; switch (intf_type) { @@ -322,22 +321,19 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, if (!priv->dsi[dsi_id]) break; - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { - dsi_encs[i] = mdp4_dsi_encoder_init(dev); - if (IS_ERR(dsi_encs[i])) { - ret = PTR_ERR(dsi_encs[i]); - dev_err(dev->dev, - "failed to construct DSI encoder: %d\n", - ret); - return ret; - } - - /* TODO: Add DMA_S later? */ - dsi_encs[i]->possible_crtcs = 1 << DMA_P; - priv->encoders[priv->num_encoders++] = dsi_encs[i]; + encoder = mdp4_dsi_encoder_init(dev); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + dev_err(dev->dev, + "failed to construct DSI encoder: %d\n", ret); + return ret; } - ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); + /* TODO: Add DMA_S later? */ + encoder->possible_crtcs = 1 << DMA_P; + priv->encoders[priv->num_encoders++] = encoder; + + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); if (ret) { dev_err(dev->dev, "failed to initialize DSI: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h index 27d5371acee0..e6dfc518d4db 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h @@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/ git clone https://github.com/freedreno/envytools.git The rules-ng-ng source files this header was generated from are: -- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) -- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-11-26 23:01:08) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02) -- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07) -- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18) -- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14) - -Copyright (C) 2013-2016 by the following authors: +- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-01-11 05:19:19) +- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54) +- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55) + +Copyright (C) 2013-2017 by the following authors: - Rob Clark <robdclark@gmail.com> (robclark) - Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) @@ -65,16 +57,19 @@ enum mdp5_intfnum { }; enum mdp5_pipe { - SSPP_VIG0 = 0, - SSPP_VIG1 = 1, - SSPP_VIG2 = 2, - SSPP_RGB0 = 3, - SSPP_RGB1 = 4, - SSPP_RGB2 = 5, - SSPP_DMA0 = 6, - SSPP_DMA1 = 7, - SSPP_VIG3 = 8, - SSPP_RGB3 = 9, + SSPP_NONE = 0, + SSPP_VIG0 = 1, + SSPP_VIG1 = 2, + SSPP_VIG2 = 3, + SSPP_RGB0 = 4, + SSPP_RGB1 = 5, + SSPP_RGB2 = 6, + SSPP_DMA0 = 7, + SSPP_DMA1 = 8, + SSPP_VIG3 = 9, + SSPP_RGB3 = 10, + SSPP_CURSOR0 = 11, + SSPP_CURSOR1 = 12, }; enum mdp5_ctl_mode { @@ -532,6 +527,7 @@ static inline uint32_t MDP5_CTL_LAYER_EXT_REG_CURSOR1(enum mdp_mixer_stage_id va static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) { switch (idx) { + case SSPP_NONE: return (INVALID_IDX(idx)); case SSPP_VIG0: return (mdp5_cfg->pipe_vig.base[0]); case SSPP_VIG1: return (mdp5_cfg->pipe_vig.base[1]); case SSPP_VIG2: return (mdp5_cfg->pipe_vig.base[2]); @@ -542,6 +538,8 @@ static inline uint32_t __offset_PIPE(enum mdp5_pipe idx) case SSPP_DMA1: return (mdp5_cfg->pipe_dma.base[1]); case SSPP_VIG3: return (mdp5_cfg->pipe_vig.base[3]); case SSPP_RGB3: return (mdp5_cfg->pipe_rgb.base[3]); + case SSPP_CURSOR0: return (mdp5_cfg->pipe_cursor.base[0]); + case SSPP_CURSOR1: return (mdp5_cfg->pipe_cursor.base[1]); default: return INVALID_IDX(idx); } } @@ -1073,6 +1071,10 @@ static inline uint32_t REG_MDP5_LM_BLEND_COLOR_OUT(uint32_t i0) { return 0x00000 #define MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA 0x00000004 #define MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA 0x00000008 #define MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA 0x00000010 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA 0x00000020 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA 0x00000040 +#define MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA 0x00000080 +#define MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT 0x80000000 static inline uint32_t REG_MDP5_LM_OUT_SIZE(uint32_t i0) { return 0x00000004 + __offset_LM(i0); } #define MDP5_LM_OUT_SIZE_HEIGHT__MASK 0xffff0000 diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index 618b2ffed9b4..34ab553f6897 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c @@ -421,6 +421,16 @@ const struct mdp5_cfg_hw msm8x96_config = { MDP_PIPE_CAP_SW_PIX_EXT | 0, }, + .pipe_cursor = { + .count = 2, + .base = { 0x34000, 0x36000 }, + .caps = MDP_PIPE_CAP_HFLIP | + MDP_PIPE_CAP_VFLIP | + MDP_PIPE_CAP_SW_PIX_EXT | + MDP_PIPE_CAP_CURSOR | + 0, + }, + .lm = { .count = 6, .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 }, diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h index 050e1618c836..b1c7daaede86 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h @@ -32,7 +32,7 @@ extern const struct mdp5_cfg_hw *mdp5_cfg; typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS); #define MDP5_SUB_BLOCK_DEFINITION \ - int count; \ + unsigned int count; \ uint32_t base[MAX_BASES] struct mdp5_sub_block { @@ -85,6 +85,7 @@ struct mdp5_cfg_hw { struct mdp5_pipe_block pipe_vig; struct mdp5_pipe_block pipe_rgb; struct mdp5_pipe_block pipe_dma; + struct mdp5_pipe_block pipe_cursor; struct mdp5_lm_block lm; struct mdp5_sub_block dspp; struct mdp5_sub_block ad; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c index c627ab6d0061..df1c8adec3f3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c @@ -16,16 +16,6 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" -struct mdp5_cmd_encoder { - struct drm_encoder base; - struct mdp5_interface intf; - bool enabled; - uint32_t bsc; - - struct mdp5_ctl *ctl; -}; -#define to_mdp5_cmd_encoder(x) container_of(x, struct mdp5_cmd_encoder, base) - static struct mdp5_kms *get_kms(struct drm_encoder *encoder) { struct msm_drm_private *priv = encoder->dev->dev_private; @@ -36,47 +26,8 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder) #include <mach/board.h> #include <linux/msm-bus.h> #include <linux/msm-bus-board.h> -#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \ - { \ - .src = MSM_BUS_MASTER_MDP_PORT0, \ - .dst = MSM_BUS_SLAVE_EBI_CH0, \ - .ab = (ab_val), \ - .ib = (ib_val), \ - } - -static struct msm_bus_vectors mdp_bus_vectors[] = { - MDP_BUS_VECTOR_ENTRY(0, 0), - MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000), -}; -static struct msm_bus_paths mdp_bus_usecases[] = { { - .num_paths = 1, - .vectors = &mdp_bus_vectors[0], -}, { - .num_paths = 1, - .vectors = &mdp_bus_vectors[1], -} }; -static struct msm_bus_scale_pdata mdp_bus_scale_table = { - .usecase = mdp_bus_usecases, - .num_usecases = ARRAY_SIZE(mdp_bus_usecases), - .name = "mdss_mdp", -}; - -static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) -{ - mdp5_cmd_enc->bsc = msm_bus_scale_register_client( - &mdp_bus_scale_table); - DBG("bus scale client: %08x", mdp5_cmd_enc->bsc); -} - -static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) -{ - if (mdp5_cmd_enc->bsc) { - msm_bus_scale_unregister_client(mdp5_cmd_enc->bsc); - mdp5_cmd_enc->bsc = 0; - } -} -static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) +static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) { if (mdp5_cmd_enc->bsc) { DBG("set bus scaling: %d", idx); @@ -89,14 +40,12 @@ static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) } } #else -static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} -static void bs_fini(struct mdp5_cmd_encoder *mdp5_cmd_enc) {} -static void bs_set(struct mdp5_cmd_encoder *mdp5_cmd_enc, int idx) {} +static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {} #endif #define VSYNC_CLK_RATE 19200000 static int pingpong_tearcheck_setup(struct drm_encoder *encoder, - struct drm_display_mode *mode) + struct drm_display_mode *mode) { struct mdp5_kms *mdp5_kms = get_kms(encoder); struct device *dev = encoder->dev->dev; @@ -176,23 +125,11 @@ static void pingpong_tearcheck_disable(struct drm_encoder *encoder) clk_disable_unprepare(mdp5_kms->vsync_clk); } -static void mdp5_cmd_encoder_destroy(struct drm_encoder *encoder) -{ - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); - bs_fini(mdp5_cmd_enc); - drm_encoder_cleanup(encoder); - kfree(mdp5_cmd_enc); -} - -static const struct drm_encoder_funcs mdp5_cmd_encoder_funcs = { - .destroy = mdp5_cmd_encoder_destroy, -}; - -static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); mode = adjusted_mode; @@ -209,9 +146,9 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, mdp5_cmd_enc->ctl); } -static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; @@ -228,9 +165,9 @@ static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) mdp5_cmd_enc->enabled = false; } -static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl; struct mdp5_interface *intf = &mdp5_cmd_enc->intf; @@ -248,16 +185,10 @@ static void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) mdp5_cmd_enc->enabled = true; } -static const struct drm_encoder_helper_funcs mdp5_cmd_encoder_helper_funcs = { - .mode_set = mdp5_cmd_encoder_mode_set, - .disable = mdp5_cmd_encoder_disable, - .enable = mdp5_cmd_encoder_enable, -}; - int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder) + struct drm_encoder *slave_encoder) { - struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); + struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms; int intf_num; u32 data = 0; @@ -292,43 +223,3 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, return 0; } - -/* initialize command mode encoder */ -struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl) -{ - struct drm_encoder *encoder = NULL; - struct mdp5_cmd_encoder *mdp5_cmd_enc; - int ret; - - if (WARN_ON((intf->type != INTF_DSI) && - (intf->mode != MDP5_INTF_DSI_MODE_COMMAND))) { - ret = -EINVAL; - goto fail; - } - - mdp5_cmd_enc = kzalloc(sizeof(*mdp5_cmd_enc), GFP_KERNEL); - if (!mdp5_cmd_enc) { - ret = -ENOMEM; - goto fail; - } - - memcpy(&mdp5_cmd_enc->intf, intf, sizeof(mdp5_cmd_enc->intf)); - encoder = &mdp5_cmd_enc->base; - mdp5_cmd_enc->ctl = ctl; - - drm_encoder_init(dev, encoder, &mdp5_cmd_encoder_funcs, - DRM_MODE_ENCODER_DSI, NULL); - - drm_encoder_helper_add(encoder, &mdp5_cmd_encoder_helper_funcs); - - bs_init(mdp5_cmd_enc); - - return encoder; - -fail: - if (encoder) - mdp5_cmd_encoder_destroy(encoder); - - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 1ce8a01a5a28..d0c8b38b96ce 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -177,6 +177,21 @@ static void mdp5_crtc_destroy(struct drm_crtc *crtc) kfree(mdp5_crtc); } +static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage) +{ + switch (stage) { + case STAGE0: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA; + case STAGE1: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA; + case STAGE2: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA; + case STAGE3: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA; + case STAGE4: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA; + case STAGE5: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA; + case STAGE6: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA; + default: + return 0; + } +} + /* * blend_setup() - blend all the planes of a CRTC * @@ -195,8 +210,10 @@ static void blend_setup(struct drm_crtc *crtc) uint32_t lm = mdp5_crtc->lm; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; unsigned long flags; - uint8_t stage[STAGE_MAX + 1]; + enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE }; int i, plane_cnt = 0; + bool bg_alpha_enabled = false; + u32 mixer_op_mode = 0; #define blender(stage) ((stage) - STAGE0) hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); @@ -218,6 +235,11 @@ static void blend_setup(struct drm_crtc *crtc) if (!pstates[STAGE_BASE]) { ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; DBG("Border Color is enabled"); + } else if (plane_cnt) { + format = to_mdp_format(msm_framebuffer_format(pstates[STAGE_BASE]->base.fb)); + + if (format->alpha_enable) + bg_alpha_enabled = true; } /* The reset for blending */ @@ -232,6 +254,12 @@ static void blend_setup(struct drm_crtc *crtc) MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST); fg_alpha = pstates[i]->alpha; bg_alpha = 0xFF - pstates[i]->alpha; + + if (!format->alpha_enable && bg_alpha_enabled) + mixer_op_mode = 0; + else + mixer_op_mode |= mdp5_lm_use_fg_alpha_mask(i); + DBG("Stage %d fg_alpha %x bg_alpha %x", i, fg_alpha, bg_alpha); if (format->alpha_enable && pstates[i]->premultiplied) { @@ -268,6 +296,8 @@ static void blend_setup(struct drm_crtc *crtc) blender(i)), bg_alpha); } + mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode); + mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags); out: @@ -370,6 +400,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, struct plane_state pstates[STAGE_MAX + 1]; const struct mdp5_cfg_hw *hw_cfg; const struct drm_plane_state *pstate; + bool cursor_plane = false; int cnt = 0, base = 0, i; DBG("%s: check", crtc->name); @@ -379,6 +410,9 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, pstates[cnt].state = to_mdp5_plane_state(pstate); cnt++; + + if (plane->type == DRM_PLANE_TYPE_CURSOR) + cursor_plane = true; } /* assign a stage based on sorted zpos property */ @@ -390,6 +424,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base)) base++; + /* trigger a warning if cursor isn't the highest zorder */ + WARN_ON(cursor_plane && + (pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR)); + /* verify that there are not too many planes attached to crtc * and that we don't have conflicting mixer stages: */ @@ -401,7 +439,10 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, } for (i = 0; i < cnt; i++) { - pstates[i].state->stage = STAGE_BASE + i + base; + if (cursor_plane && (i == (cnt - 1))) + pstates[i].state->stage = hw_cfg->lm.nb_stages; + else + pstates[i].state->stage = STAGE_BASE + i + base; DBG("%s: assign pipe %s on stage=%d", crtc->name, pstates[i].plane->name, pstates[i].state->stage); @@ -612,6 +653,16 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = { .cursor_move = mdp5_crtc_cursor_move, }; +static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = mdp5_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .set_property = drm_atomic_helper_crtc_set_property, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { .mode_set_nofb = mdp5_crtc_mode_set_nofb, .disable = mdp5_crtc_disable, @@ -727,6 +778,13 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, mdp5_ctl_set_pipeline(ctl, intf, lm); } +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc) +{ + struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + + return mdp5_crtc->ctl; +} + int mdp5_crtc_get_lm(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); @@ -745,7 +803,8 @@ void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc) /* initialize crtc */ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, - struct drm_plane *plane, int id) + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id) { struct drm_crtc *crtc = NULL; struct mdp5_crtc *mdp5_crtc; @@ -766,8 +825,12 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; - drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs, - NULL); + if (cursor_plane) + drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane, + &mdp5_crtc_no_lm_cursor_funcs, NULL); + else + drm_crtc_init_with_planes(dev, crtc, plane, NULL, + &mdp5_crtc_funcs, NULL); drm_flip_work_init(&mdp5_crtc->unref_cursor_work, "unref cursor", unref_cursor_worker); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c index d021edc3b307..8b93f7e13200 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c @@ -326,6 +326,8 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage); case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage); case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage); + case SSPP_CURSOR0: + case SSPP_CURSOR1: default: return 0; } } @@ -333,7 +335,7 @@ static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe, static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, enum mdp_mixer_stage_id stage) { - if (stage < STAGE6) + if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1)) return 0; switch (pipe) { @@ -347,12 +349,14 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe, case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; + case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); + case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); default: return 0; } } -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, - u32 ctl_blend_op_flags) +int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, + u32 ctl_blend_op_flags) { unsigned long flags; u32 blend_cfg = 0, blend_ext_cfg = 0; @@ -365,7 +369,7 @@ int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, start_stage = STAGE_BASE; } - for (i = start_stage; i < start_stage + stage_cnt; i++) { + for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) { blend_cfg |= mdp_ctl_blend_mask(stage[i], i); blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i); } @@ -422,6 +426,8 @@ u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe) case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1; case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3; case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3; + case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0; + case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1; default: return 0; } } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h index 96148c6f863c..fda00d33e4db 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h @@ -56,8 +56,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable); * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask) */ #define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0) -int mdp5_ctl_blend(struct mdp5_ctl *ctl, u8 *stage, u32 stage_cnt, - u32 ctl_blend_op_flags); +int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt, + u32 ctl_blend_op_flags); /** * mdp_ctl_flush_mask...() - Register FLUSH masks diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c index fe0c22230883..80fa482ae8ed 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c @@ -21,17 +21,6 @@ #include "drm_crtc.h" #include "drm_crtc_helper.h" -struct mdp5_encoder { - struct drm_encoder base; - struct mdp5_interface intf; - spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ - bool enabled; - uint32_t bsc; - - struct mdp5_ctl *ctl; -}; -#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) - static struct mdp5_kms *get_kms(struct drm_encoder *encoder) { struct msm_drm_private *priv = encoder->dev->dev_private; @@ -112,9 +101,9 @@ static const struct drm_encoder_funcs mdp5_encoder_funcs = { .destroy = mdp5_encoder_destroy, }; -static void mdp5_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); @@ -221,7 +210,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder, mdp5_encoder->ctl); } -static void mdp5_encoder_disable(struct drm_encoder *encoder) +static void mdp5_vid_encoder_disable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); @@ -256,7 +245,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder) mdp5_encoder->enabled = false; } -static void mdp5_encoder_enable(struct drm_encoder *encoder) +static void mdp5_vid_encoder_enable(struct drm_encoder *encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_kms *mdp5_kms = get_kms(encoder); @@ -279,6 +268,41 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) mdp5_encoder->enabled = true; } +static void mdp5_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode); + else + mdp5_vid_encoder_mode_set(encoder, mode, adjusted_mode); +} + +static void mdp5_encoder_disable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_disable(encoder); + else + mdp5_vid_encoder_disable(encoder); +} + +static void mdp5_encoder_enable(struct drm_encoder *encoder) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) + mdp5_cmd_encoder_disable(encoder); + else + mdp5_vid_encoder_enable(encoder); +} + static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = { .mode_set = mdp5_encoder_mode_set, .disable = mdp5_encoder_disable, @@ -303,8 +327,8 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder) return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf)); } -int mdp5_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder) +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder) { struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); struct mdp5_encoder *mdp5_slave_enc = to_mdp5_encoder(slave_encoder); @@ -342,6 +366,23 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder, return 0; } +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode) +{ + struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder); + struct mdp5_interface *intf = &mdp5_encoder->intf; + + /* TODO: Expand this to set writeback modes too */ + if (cmd_mode) { + WARN_ON(intf->type != INTF_DSI); + intf->mode = MDP5_INTF_DSI_MODE_COMMAND; + } else { + if (intf->type == INTF_DSI) + intf->mode = MDP5_INTF_DSI_MODE_VIDEO; + else + intf->mode = MDP5_INTF_MODE_NONE; + } +} + /* initialize encoder */ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct mdp5_interface *intf, struct mdp5_ctl *ctl) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index c396d459a9d0..3eb0749223d9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -148,7 +148,15 @@ static int mdp5_set_split_display(struct msm_kms *kms, return mdp5_cmd_encoder_set_split_display(encoder, slave_encoder); else - return mdp5_encoder_set_split_display(encoder, slave_encoder); + return mdp5_vid_encoder_set_split_display(encoder, + slave_encoder); +} + +static void mdp5_set_encoder_mode(struct msm_kms *kms, + struct drm_encoder *encoder, + bool cmd_mode) +{ + mdp5_encoder_set_intf_mode(encoder, cmd_mode); } static void mdp5_kms_destroy(struct msm_kms *kms) @@ -230,6 +238,7 @@ static const struct mdp_kms_funcs kms_funcs = { .get_format = mdp_get_format, .round_pixclk = mdp5_round_pixclk, .set_split_display = mdp5_set_split_display, + .set_encoder_mode = mdp5_set_encoder_mode, .destroy = mdp5_kms_destroy, #ifdef CONFIG_DEBUG_FS .debugfs_init = mdp5_kms_debugfs_init, @@ -267,7 +276,7 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms) static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, enum mdp5_intf_type intf_type, int intf_num, - enum mdp5_intf_mode intf_mode, struct mdp5_ctl *ctl) + struct mdp5_ctl *ctl) { struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; @@ -275,21 +284,15 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms, struct mdp5_interface intf = { .num = intf_num, .type = intf_type, - .mode = intf_mode, + .mode = MDP5_INTF_MODE_NONE, }; - if ((intf_type == INTF_DSI) && - (intf_mode == MDP5_INTF_DSI_MODE_COMMAND)) - encoder = mdp5_cmd_encoder_init(dev, &intf, ctl); - else - encoder = mdp5_encoder_init(dev, &intf, ctl); - + encoder = mdp5_encoder_init(dev, &intf, ctl); if (IS_ERR(encoder)) { dev_err(dev->dev, "failed to construct encoder\n"); return encoder; } - encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; priv->encoders[priv->num_encoders++] = encoder; return encoder; @@ -338,8 +341,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, - MDP5_INTF_MODE_NONE, ctl); + encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -357,8 +359,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, - MDP5_INTF_MODE_NONE, ctl); + encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl); if (IS_ERR(encoder)) { ret = PTR_ERR(encoder); break; @@ -369,9 +370,6 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) case INTF_DSI: { int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num); - struct drm_encoder *dsi_encs[MSM_DSI_ENCODER_NUM]; - enum mdp5_intf_mode mode; - int i; if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) { dev_err(dev->dev, "failed to find dsi from intf %d\n", @@ -389,19 +387,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num) break; } - for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) { - mode = (i == MSM_DSI_CMD_ENCODER_ID) ? - MDP5_INTF_DSI_MODE_COMMAND : - MDP5_INTF_DSI_MODE_VIDEO; - dsi_encs[i] = construct_encoder(mdp5_kms, INTF_DSI, - intf_num, mode, ctl); - if (IS_ERR(dsi_encs[i])) { - ret = PTR_ERR(dsi_encs[i]); - break; - } + encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl); + if (IS_ERR(encoder)) { + ret = PTR_ERR(encoder); + break; } - ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, dsi_encs); + ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder); break; } default: @@ -418,20 +410,48 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) struct drm_device *dev = mdp5_kms->dev; struct msm_drm_private *priv = dev->dev_private; const struct mdp5_cfg_hw *hw_cfg; - int i, ret; + unsigned int num_crtcs; + int i, ret, pi = 0, ci = 0; + struct drm_plane *primary[MAX_BASES] = { NULL }; + struct drm_plane *cursor[MAX_BASES] = { NULL }; hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); - /* Construct planes equaling the number of hw pipes, and CRTCs - * for the N layer-mixers (LM). The first N planes become primary + /* + * Construct encoders and modeset initialize connector devices + * for each external display interface. + */ + for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { + ret = modeset_init_intf(mdp5_kms, i); + if (ret) + goto fail; + } + + /* + * We should ideally have less number of encoders (set up by parsing + * the MDP5 interfaces) than the number of layer mixers present in HW, + * but let's be safe here anyway + */ + num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count); + + /* + * Construct planes equaling the number of hw pipes, and CRTCs for the + * N encoders set up by the driver. The first N planes become primary * planes for the CRTCs, with the remainder as overlay planes: */ for (i = 0; i < mdp5_kms->num_hwpipes; i++) { - bool primary = i < mdp5_cfg->lm.count; + struct mdp5_hw_pipe *hwpipe = mdp5_kms->hwpipes[i]; struct drm_plane *plane; - struct drm_crtc *crtc; + enum drm_plane_type type; - plane = mdp5_plane_init(dev, primary); + if (i < num_crtcs) + type = DRM_PLANE_TYPE_PRIMARY; + else if (hwpipe->caps & MDP_PIPE_CAP_CURSOR) + type = DRM_PLANE_TYPE_CURSOR; + else + type = DRM_PLANE_TYPE_OVERLAY; + + plane = mdp5_plane_init(dev, type); if (IS_ERR(plane)) { ret = PTR_ERR(plane); dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret); @@ -439,10 +459,16 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) } priv->planes[priv->num_planes++] = plane; - if (!primary) - continue; + if (type == DRM_PLANE_TYPE_PRIMARY) + primary[pi++] = plane; + if (type == DRM_PLANE_TYPE_CURSOR) + cursor[ci++] = plane; + } + + for (i = 0; i < num_crtcs; i++) { + struct drm_crtc *crtc; - crtc = mdp5_crtc_init(dev, plane, i); + crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i); if (IS_ERR(crtc)) { ret = PTR_ERR(crtc); dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret); @@ -451,13 +477,14 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) priv->crtcs[priv->num_crtcs++] = crtc; } - /* Construct encoders and modeset initialize connector devices - * for each external display interface. + /* + * Now that we know the number of crtcs we've created, set the possible + * crtcs for the encoders */ - for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) { - ret = modeset_init_intf(mdp5_kms, i); - if (ret) - goto fail; + for (i = 0; i < priv->num_encoders; i++) { + struct drm_encoder *encoder = priv->encoders[i]; + + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1; } return 0; @@ -773,6 +800,9 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms) static const enum mdp5_pipe dma_planes[] = { SSPP_DMA0, SSPP_DMA1, }; + static const enum mdp5_pipe cursor_planes[] = { + SSPP_CURSOR0, SSPP_CURSOR1, + }; const struct mdp5_cfg_hw *hw_cfg; int ret; @@ -796,6 +826,13 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms) if (ret) return ret; + /* Construct cursor pipes: */ + ret = construct_pipes(mdp5_kms, hw_cfg->pipe_cursor.count, + cursor_planes, hw_cfg->pipe_cursor.base, + hw_cfg->pipe_cursor.caps); + if (ret) + return ret; + return 0; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index cdfc63d90c7b..9de471191eba 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -126,6 +126,17 @@ struct mdp5_interface { enum mdp5_intf_mode mode; }; +struct mdp5_encoder { + struct drm_encoder base; + struct mdp5_interface intf; + spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */ + bool enabled; + uint32_t bsc; + + struct mdp5_ctl *ctl; +}; +#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base) + static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) { msm_writel(data, mdp5_kms->mmio + reg); @@ -156,6 +167,7 @@ static inline const char *pipe2name(enum mdp5_pipe pipe) NAME(RGB0), NAME(RGB1), NAME(RGB2), NAME(DMA0), NAME(DMA1), NAME(VIG3), NAME(RGB3), + NAME(CURSOR0), NAME(CURSOR1), #undef NAME }; return names[pipe]; @@ -231,8 +243,10 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms); uint32_t mdp5_plane_get_flush(struct drm_plane *plane); enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); -struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary); +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type); +struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); int mdp5_crtc_get_lm(struct drm_crtc *crtc); @@ -240,25 +254,36 @@ void mdp5_crtc_set_pipeline(struct drm_crtc *crtc, struct mdp5_interface *intf, struct mdp5_ctl *ctl); void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc); struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, - struct drm_plane *plane, int id); + struct drm_plane *plane, + struct drm_plane *cursor_plane, int id); struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, struct mdp5_interface *intf, struct mdp5_ctl *ctl); -int mdp5_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder); +int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder, + struct drm_encoder *slave_encoder); +void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode); int mdp5_encoder_get_linecount(struct drm_encoder *encoder); u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder); #ifdef CONFIG_DRM_MSM_DSI -struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl); +void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +void mdp5_cmd_encoder_disable(struct drm_encoder *encoder); +void mdp5_cmd_encoder_enable(struct drm_encoder *encoder); int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder, - struct drm_encoder *slave_encoder); + struct drm_encoder *slave_encoder); #else -static inline struct drm_encoder *mdp5_cmd_encoder_init(struct drm_device *dev, - struct mdp5_interface *intf, struct mdp5_ctl *ctl) +static inline void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} +static inline void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) +{ +} +static inline void mdp5_cmd_encoder_enable(struct drm_encoder *encoder) { - return ERR_PTR(-EINVAL); } static inline int mdp5_cmd_encoder_set_split_display( struct drm_encoder *encoder, struct drm_encoder *slave_encoder) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c index 1ae9dc8d260d..35c4dabb0c0c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c @@ -53,6 +53,14 @@ struct mdp5_hw_pipe *mdp5_pipe_assign(struct drm_atomic_state *s, if (caps & ~cur->caps) continue; + /* + * don't assign a cursor pipe to a plane that isn't going to + * be used as a cursor + */ + if (cur->caps & MDP_PIPE_CAP_CURSOR && + plane->type != DRM_PLANE_TYPE_CURSOR) + continue; + /* possible candidate, take the one with the * fewest unneeded caps bits set: */ diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index b9fb111d3428..0ffb8affef35 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -29,6 +29,11 @@ struct mdp5_plane { static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, + struct drm_rect *src, struct drm_rect *dest); + +static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t src_x, uint32_t src_y, @@ -45,7 +50,7 @@ static struct mdp5_kms *get_kms(struct drm_plane *plane) static bool plane_enabled(struct drm_plane_state *state) { - return state->fb && state->crtc; + return state->visible; } static void mdp5_plane_destroy(struct drm_plane *plane) @@ -246,6 +251,19 @@ static const struct drm_plane_funcs mdp5_plane_funcs = { .atomic_print_state = mdp5_plane_atomic_print_state, }; +static const struct drm_plane_funcs mdp5_cursor_plane_funcs = { + .update_plane = mdp5_update_cursor_plane_legacy, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = mdp5_plane_destroy, + .set_property = drm_atomic_helper_plane_set_property, + .atomic_set_property = mdp5_plane_atomic_set_property, + .atomic_get_property = mdp5_plane_atomic_get_property, + .reset = mdp5_plane_reset, + .atomic_duplicate_state = mdp5_plane_duplicate_state, + .atomic_destroy_state = mdp5_plane_destroy_state, + .atomic_print_state = mdp5_plane_atomic_print_state, +}; + static int mdp5_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -272,15 +290,20 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, msm_framebuffer_cleanup(fb, mdp5_kms->id); } -static int mdp5_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) +#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) +static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, + struct drm_plane_state *state) { struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state); + struct drm_plane *plane = state->plane; struct drm_plane_state *old_state = plane->state; struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg); bool new_hwpipe = false; uint32_t max_width, max_height; uint32_t caps = 0; + struct drm_rect clip; + int min_scale, max_scale; + int ret; DBG("%s: check (%d -> %d)", plane->name, plane_enabled(old_state), plane_enabled(state)); @@ -296,6 +319,18 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return -ERANGE; } + clip.x1 = 0; + clip.y1 = 0; + clip.x2 = crtc_state->adjusted_mode.hdisplay; + clip.y2 = crtc_state->adjusted_mode.vdisplay; + min_scale = FRAC_16_16(1, 8); + max_scale = FRAC_16_16(8, 1); + + ret = drm_plane_helper_check_state(state, &clip, min_scale, + max_scale, true, true); + if (ret) + return ret; + if (plane_enabled(state)) { unsigned int rotation; const struct mdp_format *format; @@ -321,6 +356,9 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, if (rotation & DRM_REFLECT_Y) caps |= MDP_PIPE_CAP_VFLIP; + if (plane->type == DRM_PLANE_TYPE_CURSOR) + caps |= MDP_PIPE_CAP_CURSOR; + /* (re)allocate hw pipe if we don't have one or caps-mismatch: */ if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps)) new_hwpipe = true; @@ -356,6 +394,23 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, return 0; } +static int mdp5_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + crtc = state->crtc ? state->crtc : plane->state->crtc; + if (!crtc) + return 0; + + crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + if (WARN_ON(!crtc_state)) + return -EINVAL; + + return mdp5_plane_atomic_check_with_state(crtc_state, state); +} + static void mdp5_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { @@ -368,10 +423,7 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane, ret = mdp5_plane_mode_set(plane, state->crtc, state->fb, - state->crtc_x, state->crtc_y, - state->crtc_w, state->crtc_h, - state->src_x, state->src_y, - state->src_w, state->src_h); + &state->src, &state->dst); /* atomic_check should have ensured that this doesn't fail */ WARN_ON(ret < 0); } @@ -664,10 +716,7 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe, static int mdp5_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h) + struct drm_rect *src, struct drm_rect *dest) { struct drm_plane_state *pstate = plane->state; struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe; @@ -683,6 +732,10 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, uint32_t pix_format; unsigned int rotation; bool vflip, hflip; + int crtc_x, crtc_y; + unsigned int crtc_w, crtc_h; + uint32_t src_x, src_y; + uint32_t src_w, src_h; unsigned long flags; int ret; @@ -695,6 +748,16 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, format = to_mdp_format(msm_framebuffer_format(fb)); pix_format = format->base.pixel_format; + src_x = src->x1; + src_y = src->y1; + src_w = drm_rect_width(src); + src_h = drm_rect_height(src); + + crtc_x = dest->x1; + crtc_y = dest->y1; + crtc_w = drm_rect_width(dest); + crtc_h = drm_rect_height(dest); + /* src values are in Q16 fixed point, convert to integer: */ src_x = src_x >> 16; src_y = src_y >> 16; @@ -818,12 +881,88 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, return ret; } +static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane, + struct drm_crtc *crtc, struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_plane_state *plane_state, *new_plane_state; + struct mdp5_plane_state *mdp5_pstate; + struct drm_crtc_state *crtc_state = crtc->state; + int ret; + + if (!crtc_state->active || drm_atomic_crtc_needs_modeset(crtc_state)) + goto slow; + + plane_state = plane->state; + mdp5_pstate = to_mdp5_plane_state(plane_state); + + /* don't use fast path if we don't have a hwpipe allocated yet */ + if (!mdp5_pstate->hwpipe) + goto slow; + + /* only allow changing of position(crtc x/y or src x/y) in fast path */ + if (plane_state->crtc != crtc || + plane_state->src_w != src_w || + plane_state->src_h != src_h || + plane_state->crtc_w != crtc_w || + plane_state->crtc_h != crtc_h || + !plane_state->fb || + plane_state->fb != fb) + goto slow; + + new_plane_state = mdp5_plane_duplicate_state(plane); + if (!new_plane_state) + return -ENOMEM; + + new_plane_state->src_x = src_x; + new_plane_state->src_y = src_y; + new_plane_state->src_w = src_w; + new_plane_state->src_h = src_h; + new_plane_state->crtc_x = crtc_x; + new_plane_state->crtc_y = crtc_y; + new_plane_state->crtc_w = crtc_w; + new_plane_state->crtc_h = crtc_h; + + ret = mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state); + if (ret) + goto slow_free; + + if (new_plane_state->visible) { + struct mdp5_ctl *ctl; + + ret = mdp5_plane_mode_set(plane, crtc, fb, + &new_plane_state->src, + &new_plane_state->dst); + WARN_ON(ret < 0); + + ctl = mdp5_crtc_get_ctl(crtc); + + mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane)); + } + + *to_mdp5_plane_state(plane_state) = + *to_mdp5_plane_state(new_plane_state); + + mdp5_plane_destroy_state(plane, new_plane_state); + + return 0; +slow_free: + mdp5_plane_destroy_state(plane, new_plane_state); +slow: + return drm_atomic_helper_update_plane(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); +} + enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) { struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state); if (WARN_ON(!pstate->hwpipe)) - return 0; + return SSPP_NONE; return pstate->hwpipe->pipe; } @@ -839,12 +978,12 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane) } /* initialize plane */ -struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) +struct drm_plane *mdp5_plane_init(struct drm_device *dev, + enum drm_plane_type type) { struct drm_plane *plane = NULL; struct mdp5_plane *mdp5_plane; int ret; - enum drm_plane_type type; mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL); if (!mdp5_plane) { @@ -857,10 +996,16 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary) mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats, ARRAY_SIZE(mdp5_plane->formats), false); - type = primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; - ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, - mdp5_plane->formats, mdp5_plane->nformats, - type, NULL); + if (type == DRM_PLANE_TYPE_CURSOR) + ret = drm_universal_plane_init(dev, plane, 0xff, + &mdp5_cursor_plane_funcs, + mdp5_plane->formats, mdp5_plane->nformats, + type, NULL); + else + ret = drm_universal_plane_init(dev, plane, 0xff, + &mdp5_plane_funcs, + mdp5_plane->formats, mdp5_plane->nformats, + type, NULL); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h index 303130320748..7574cdfef418 100644 --- a/drivers/gpu/drm/msm/mdp/mdp_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h @@ -112,6 +112,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format); #define MDP_PIPE_CAP_CSC BIT(3) #define MDP_PIPE_CAP_DECIMATION BIT(4) #define MDP_PIPE_CAP_SW_PIX_EXT BIT(5) +#define MDP_PIPE_CAP_CURSOR BIT(6) static inline bool pipe_supports_yuv(uint32_t pipe_caps) { diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c index 30b5d23e53b4..9633a68b14d7 100644 --- a/drivers/gpu/drm/msm/msm_atomic.c +++ b/drivers/gpu/drm/msm/msm_atomic.c @@ -93,11 +93,6 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev, if (!crtc->state->enable) continue; - /* Legacy cursor ioctls are completely unsynced, and userspace - * relies on that (by doing tons of cursor updates). */ - if (old_state->legacy_cursor_update) - continue; - kms->funcs->wait_for_crtc_commit_done(kms, crtc); } } @@ -151,20 +146,29 @@ static void commit_worker(struct work_struct *work) complete_commit(container_of(work, struct msm_commit, work), true); } +/* + * this func is identical to the drm_atomic_helper_check, but we keep this + * because we might eventually need to have a more finegrained check + * sequence without using the atomic helpers. + * + * In the past, we first called drm_atomic_helper_check_planes, and then + * drm_atomic_helper_check_modeset. We needed this because the MDP5 plane's + * ->atomic_check could update ->mode_changed for pixel format changes. + * This, however isn't needed now because if there is a pixel format change, + * we just assign a new hwpipe for it with a new SMP allocation. We might + * eventually hit a condition where we would need to do a full modeset if + * we run out of planes. There, we'd probably need to set mode_changed. + */ int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { int ret; - /* - * msm ->atomic_check can update ->mode_changed for pixel format - * changes, hence must be run before we check the modeset changes. - */ - ret = drm_atomic_helper_check_planes(dev, state); + ret = drm_atomic_helper_check_modeset(dev, state); if (ret) return ret; - ret = drm_atomic_helper_check_modeset(dev, state); + ret = drm_atomic_helper_check_planes(dev, state); if (ret) return ret; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 54207fe59307..cb47f4a14215 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -91,6 +91,25 @@ module_param(dumpstate, bool, 0600); * Util/helpers: */ +struct clk *msm_clk_get(struct platform_device *pdev, const char *name) +{ + struct clk *clk; + char name2[32]; + + clk = devm_clk_get(&pdev->dev, name); + if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) + return clk; + + snprintf(name2, sizeof(name2), "%s_clk", name); + + clk = devm_clk_get(&pdev->dev, name2); + if (!IS_ERR(clk)) + dev_warn(&pdev->dev, "Using legacy clk name binding. Use " + "\"%s\" instead of \"%s\"\n", name, name2); + + return clk; +} + void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname) { @@ -984,6 +1003,7 @@ static int add_display_components(struct device *dev, * as components. */ static const struct of_device_id msm_gpu_match[] = { + { .compatible = "qcom,adreno" }, { .compatible = "qcom,adreno-3xx" }, { .compatible = "qcom,kgsl-3d0" }, { }, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index ed4dad3ca133..cdd7b2f8e977 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -275,16 +275,11 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, struct drm_encoder *encoder); struct msm_dsi; -enum msm_dsi_encoder_id { - MSM_DSI_VIDEO_ENCODER_ID = 0, - MSM_DSI_CMD_ENCODER_ID = 1, - MSM_DSI_ENCODER_NUM = 2 -}; #ifdef CONFIG_DRM_MSM_DSI void __init msm_dsi_register(void); void __exit msm_dsi_unregister(void); int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]); + struct drm_encoder *encoder); #else static inline void __init msm_dsi_register(void) { @@ -293,8 +288,8 @@ static inline void __exit msm_dsi_unregister(void) { } static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, - struct drm_device *dev, - struct drm_encoder *encoders[MSM_DSI_ENCODER_NUM]) + struct drm_device *dev, + struct drm_encoder *encoder) { return -EINVAL; } @@ -318,6 +313,7 @@ static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; } static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} #endif +struct clk *msm_clk_get(struct platform_device *pdev, const char *name); void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname); void msm_writel(u32 data, void __iomem *addr); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 489676568a10..1172fe7a9252 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -95,13 +95,13 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, */ submit->bos[i].flags = 0; - ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo)); - if (unlikely(ret)) { + if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) { pagefault_enable(); spin_unlock(&file->table_lock); - ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); - if (ret) + if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) { + ret = -EFAULT; goto out; + } spin_lock(&file->table_lock); pagefault_disable(); } @@ -317,9 +317,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob uint64_t iova; bool valid; - ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc)); - if (ret) + if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) { + ret = -EFAULT; goto out; + } if (submit_reloc.submit_offset % 4) { DRM_ERROR("non-aligned reloc offset: %u\n", diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index b28527a65d09..99e05aacbee1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -560,8 +560,7 @@ static irqreturn_t irq_handler(int irq, void *data) } static const char *clk_names[] = { - "core_clk", "iface_clk", "rbbmtimer_clk", "mem_clk", - "mem_iface_clk", "alt_mem_iface_clk", + "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface", }; int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, @@ -625,13 +624,13 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, /* Acquire clocks: */ for (i = 0; i < ARRAY_SIZE(clk_names); i++) { - gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); + gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]); DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); if (IS_ERR(gpu->grp_clks[i])) gpu->grp_clks[i] = NULL; } - gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); + gpu->ebi1_clk = msm_clk_get(pdev, "bus"); DBG("ebi1_clk: %p", gpu->ebi1_clk); if (IS_ERR(gpu->ebi1_clk)) gpu->ebi1_clk = NULL; diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 61aaaa1de6eb..7f5779daf5c8 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -24,9 +24,12 @@ struct msm_iommu { }; #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) -static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, +static int msm_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg) { + struct msm_iommu *iommu = arg; + if (iommu->base.handler) + return iommu->base.handler(iommu->base.arg, iova, flags); pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); return 0; } @@ -136,7 +139,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) iommu->domain = domain; msm_mmu_init(&iommu->base, dev, &funcs); - iommu_set_fault_handler(domain, msm_fault_handler, dev); + iommu_set_fault_handler(domain, msm_fault_handler, iommu); return &iommu->base; } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index e470f4cf8f76..117635d2b8c5 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -56,6 +56,9 @@ struct msm_kms_funcs { struct drm_encoder *encoder, struct drm_encoder *slave_encoder, bool is_cmd_mode); + void (*set_encoder_mode)(struct msm_kms *kms, + struct drm_encoder *encoder, + bool cmd_mode); /* cleanup: */ void (*destroy)(struct msm_kms *kms); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index f85c879e68d2..aa2c5d4580c8 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -33,6 +33,8 @@ struct msm_mmu_funcs { struct msm_mmu { const struct msm_mmu_funcs *funcs; struct device *dev; + int (*handler)(void *arg, unsigned long iova, int flags); + void *arg; }; static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, @@ -45,4 +47,11 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); +static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, + int (*handler)(void *arg, unsigned long iova, int flags)) +{ + mmu->arg = arg; + mmu->handler = handler; +} + #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index a555681c3096..90075b676256 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c @@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, int *burst, int *lwm) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; struct nv_fifo_info fifo_data; struct nv_sim_state sim_data; int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); @@ -227,7 +227,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); } - if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) nv04_calc_arb(&fifo_data, &sim_data); else nv10_calc_arb(&fifo_data, &sim_data); @@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) nv04_update_arb(dev, vclk, bpp, burst, lwm); else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index a72754d73c84..ab7b69c11d40 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -113,8 +113,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod { struct drm_device *dev = crtc->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_bios *bios = nvxx_bios(&drm->device); - struct nvkm_clk *clk = nvxx_clk(&drm->device); + struct nvkm_bios *bios = nvxx_bios(&drm->client.device); + struct nvkm_clk *clk = nvxx_clk(&drm->client.device); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nv04_mode_state *state = &nv04_display(dev)->mode_reg; struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index]; @@ -138,7 +138,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod * has yet been observed in allowing the use a single stage pll on all * nv43 however. the behaviour of single stage use is untested on nv40 */ - if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) + if (drm->client.device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2)) memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2)); @@ -148,10 +148,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK; /* The blob uses this always, so let's do the same */ - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE; /* again nv40 and some nv43 act more like nv3x as described above */ - if (drm->device.info.chipset < 0x41) + if (drm->client.device.info.chipset < 0x41) state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL | NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL; state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; @@ -270,7 +270,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode) horizEnd = horizTotal - 2; horizBlankEnd = horizTotal + 4; #if 0 - if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) + if (dev->overlayAdaptor && drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) /* This reportedly works around some video overlay bandwidth problems */ horizTotal += 2; #endif @@ -505,7 +505,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 | NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 | NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM; - if (drm->device.info.chipset >= 0x11) + if (drm->client.device.info.chipset >= 0x11) regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE; @@ -546,26 +546,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) * 1 << 30 on 0x60.830), for no apparent reason */ regp->CRTC[NV_CIO_CRE_59] = off_chip_digital; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1; regp->crtc_830 = mode->crtc_vdisplay - 3; regp->crtc_834 = mode->crtc_vdisplay - 1; - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) /* This is what the blob does */ regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC; else regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC; /* Some misc regs */ - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { regp->CRTC[NV_CIO_CRE_85] = 0xFF; regp->CRTC[NV_CIO_CRE_86] = 0x1; } @@ -577,7 +577,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) /* Generic PRAMDAC regs */ - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) /* Only bit that bios and blob set. */ regp->nv10_cursync = (1 << 25); @@ -586,7 +586,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; if (fb->format->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; - if (drm->device.info.chipset >= 0x11) + if (drm->client.device.info.chipset >= 0x11) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */ @@ -649,7 +649,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, nv_crtc_mode_set_vga(crtc, adjusted_mode); /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */ - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk); nv_crtc_mode_set_regs(crtc, adjusted_mode); nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock); @@ -710,7 +710,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc) /* Some more preparation. */ NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900); NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000); } @@ -886,7 +886,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) { regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8; crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47); } @@ -967,7 +967,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src, { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->device.info.chipset == 0x11) { + if (drm->client.device.info.chipset == 0x11) { pixel = ((pixel & 0x000000ff) << 24) | ((pixel & 0x0000ff00) << 8) | ((pixel & 0x00ff0000) >> 8) | @@ -1008,7 +1008,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, if (ret) goto out; - if (drm->device.info.chipset >= 0x11) + if (drm->client.device.info.chipset >= 0x11) nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); else nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo); @@ -1124,8 +1124,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num) drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); - ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, - 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); + ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100, + TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL, + &nv_crtc->cursor.nvbo); if (!ret) { ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); if (!ret) { diff --git a/drivers/gpu/drm/nouveau/dispnv04/cursor.c b/drivers/gpu/drm/nouveau/dispnv04/cursor.c index c83116a308a4..f26e44ea7389 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/cursor.c +++ b/drivers/gpu/drm/nouveau/dispnv04/cursor.c @@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) nv_fix_nv40_hw_cursor(dev, nv_crtc->index); } diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index b6cc7766e6f7..4feab0a5419d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -66,7 +66,7 @@ int nv04_dac_output_offset(struct drm_encoder *encoder) static int sample_load_twice(struct drm_device *dev, bool sense[2]) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; int i; for (i = 0; i < 2; i++) { @@ -80,19 +80,19 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2]) * use a 10ms timeout (guards against crtc being inactive, in * which case blank state would never change) */ - if (nvif_msec(&drm->device, 10, + if (nvif_msec(&drm->client.device, 10, if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) break; ) < 0) return -EBUSY; - if (nvif_msec(&drm->device, 10, + if (nvif_msec(&drm->client.device, 10, if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) break; ) < 0) return -EBUSY; - if (nvif_msec(&drm->device, 10, + if (nvif_msec(&drm->client.device, 10, if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1)) break; ) < 0) @@ -133,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; struct nouveau_drm *drm = nouveau_drm(dev); uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode; uint8_t saved_palette0[3], saved_palette_mask; @@ -236,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &nouveau_drm(dev)->device.object; - struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; + struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, @@ -288,7 +288,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ routput = (saved_routput & 0xfffffece) | head << 8; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) { if (dcb->type == DCB_OUTPUT_TV) routput |= 0x1a << 16; else @@ -403,7 +403,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder, } /* This could use refinement for flatpanels, but it should work this way */ - if (drm->device.info.chipset < 0x44) + if (drm->client.device.info.chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); else NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 2e5bb2afda7c..9805d2cdc1a1 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; @@ -417,7 +417,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || (nv_connector->dithering_mode == DITHERING_MODE_AUTO && fb->format->depth > connector->display_info.bpc * 3)) { - if (drm->device.info.chipset == 0x11) + if (drm->client.device.info.chipset == 0x11) regp->dither = savep->dither | 0x00010000; else { int i; @@ -428,7 +428,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, } } } else { - if (drm->device.info.chipset != 0x11) { + if (drm->client.device.info.chipset != 0x11) { /* reset them */ int i; for (i = 0; i < 3; i++) { @@ -464,7 +464,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); /* This could use refinement for flatpanels, but it should work this way */ - if (drm->device.info.chipset < 0x44) + if (drm->client.device.info.chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); else NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); @@ -486,7 +486,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) { #ifdef __powerpc__ struct drm_device *dev = encoder->dev; - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; /* BIOS scripts usually take care of the backlight, thanks * Apple for your consistency. @@ -624,7 +624,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct dcb_output *dcb = nouveau_encoder(encoder)->dcb; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); + struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI); struct nvkm_i2c_bus_probe info[] = { { diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index 34c0f2f67548..5b9d549aa791 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -35,7 +35,7 @@ int nv04_display_create(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); + struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); struct dcb_table *dcb = &drm->vbios.dcb; struct drm_connector *connector, *ct; struct drm_encoder *encoder; @@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev) if (!disp) return -ENOMEM; - nvif_object_map(&drm->device.object); + nvif_object_map(&drm->client.device.object); nouveau_display(dev)->priv = disp; nouveau_display(dev)->dtor = nv04_display_destroy; @@ -139,7 +139,7 @@ nv04_display_destroy(struct drm_device *dev) nouveau_display(dev)->priv = NULL; kfree(disp); - nvif_object_unmap(&drm->device.object); + nvif_object_unmap(&drm->client.device.object); } int diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 7030307d2d48..bea4543554ba 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h @@ -129,7 +129,7 @@ nv_two_heads(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); const int impl = dev->pdev->device & 0x0ff0; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) return true; @@ -148,7 +148,7 @@ nv_two_reg_pll(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); const int impl = dev->pdev->device & 0x0ff0; - if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) + if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) return true; return false; } @@ -170,7 +170,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table, struct dcb_output *outp, int crtc) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_bios *bios = nvxx_bios(&drm->device); + struct nvkm_bios *bios = nvxx_bios(&drm->client.device); struct nvbios_init init = { .subdev = &bios->subdev, .bios = bios, diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 74856a8b8f35..b98599002831 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -89,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner) if (owner == 1) owner *= 3; - if (drm->device.info.chipset == 0x11) { + if (drm->client.device.info.chipset == 0x11) { /* This might seem stupid, but the blob does it and * omitting it often locks the system up. */ @@ -100,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner) /* CR44 is always changed on CRTC0 */ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner); - if (drm->device.info.chipset == 0x11) { /* set me harder */ + if (drm->client.device.info.chipset == 0x11) { /* set me harder */ NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner); } @@ -149,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1, pllvals->NM1 = pll1 & 0xffff; if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2) pllvals->NM2 = pll2 & 0xffff; - else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) { + else if (drm->client.device.info.chipset == 0x30 || drm->client.device.info.chipset == 0x35) { pllvals->M1 &= 0xf; /* only 4 bits */ if (pll1 & NV30_RAMDAC_ENABLE_VCO2) { pllvals->M2 = (pll1 >> 4) & 0x7; @@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, struct nvkm_pll_vals *pllvals) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &drm->device.object; - struct nvkm_bios *bios = nvxx_bios(&drm->device); + struct nvif_object *device = &drm->client.device.object; + struct nvkm_bios *bios = nvxx_bios(&drm->client.device); uint32_t reg1, pll1, pll2 = 0; struct nvbios_pll pll_lim; int ret; @@ -184,7 +184,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype, pll2 = nvif_rd32(device, reg2); } - if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) { uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580); /* check whether vpll has been forced into single stage mode */ @@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t mpllP; pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP); + mpllP = (mpllP >> 8) & 0xf; if (!mpllP) mpllP = 4; @@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) uint32_t clock; pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock); - return clock; + return clock / 1000; } ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals); @@ -252,7 +253,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; struct nvkm_clk *clk = nvxx_clk(device); struct nvkm_bios *bios = nvxx_bios(device); struct nvbios_pll pll_lim; @@ -391,21 +392,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head, struct nv04_crtc_reg *regp = &state->crtc_reg[head]; int i; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC); nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals); state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT); if (nv_two_heads(dev)) state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK); - if (drm->device.info.chipset == 0x11) + if (drm->client.device.info.chipset == 0x11) regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11); regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL); if (nv_gf4_disp_arch(dev)) regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630); - if (drm->device.info.chipset >= 0x30) + if (drm->client.device.info.chipset >= 0x30) regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634); regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP); @@ -447,7 +448,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head, if (nv_gf4_disp_arch(dev)) regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20); regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24); regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34); @@ -463,26 +464,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_clk *clk = nvxx_clk(&drm->device); + struct nvkm_clk *clk = nvxx_clk(&drm->client.device); struct nv04_crtc_reg *regp = &state->crtc_reg[head]; uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; int i; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync); clk->pll_prog(clk, pllreg, ®p->pllvals); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel); if (nv_two_heads(dev)) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk); - if (drm->device.info.chipset == 0x11) + if (drm->client.device.info.chipset == 0x11) NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither); NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl); if (nv_gf4_disp_arch(dev)) NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630); - if (drm->device.info.chipset >= 0x30) + if (drm->client.device.info.chipset >= 0x30) NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634); NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup); @@ -519,7 +520,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head, if (nv_gf4_disp_arch(dev)) NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20); NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24); NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34); @@ -600,10 +601,10 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_21); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) rd_cio_state(dev, head, regp, NV_CIO_CRE_47); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) rd_cio_state(dev, head, regp, 0x9f); rd_cio_state(dev, head, regp, NV_CIO_CRE_49); @@ -612,14 +613,14 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830); regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850); if (nv_two_heads(dev)) @@ -631,7 +632,7 @@ nv_save_state_ext(struct drm_device *dev, int head, rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB); rd_cio_state(dev, head, regp, NV_CIO_CRE_4B); @@ -660,12 +661,12 @@ nv_load_state_ext(struct drm_device *dev, int head, struct nv04_mode_state *state) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; struct nv04_crtc_reg *regp = &state->crtc_reg[head]; uint32_t reg900; int i; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { if (nv_two_heads(dev)) /* setting ENGINE_CTRL (EC) *must* come before * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in @@ -677,20 +678,20 @@ nv_load_state_ext(struct drm_device *dev, int head, nvif_wr32(device, NV_PVIDEO_INTR_EN, 0); nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0); nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0); - nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1); - nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1); - nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1); - nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1); + nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->client.device.info.ram_size - 1); + nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->client.device.info.ram_size - 1); + nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->client.device.info.ram_size - 1); + nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->client.device.info.ram_size - 1); nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0); NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg); NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830); NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) { NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850); reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900); @@ -713,23 +714,23 @@ nv_load_state_ext(struct drm_device *dev, int head, wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) wr_cio_state(dev, head, regp, NV_CIO_CRE_47); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) wr_cio_state(dev, head, regp, 0x9f); wr_cio_state(dev, head, regp, NV_CIO_CRE_49); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) nv_fix_nv40_hw_cursor(dev, head); wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX); wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB); wr_cio_state(dev, head, regp, NV_CIO_CRE_4B); @@ -737,14 +738,14 @@ nv_load_state_ext(struct drm_device *dev, int head, } /* NV11 and NV20 stop at 0x52. */ if (nv_gf4_disp_arch(dev)) { - if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) { + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) { /* Not waiting for vertical retrace before modifying CRE_53/CRE_54 causes lockups. */ - nvif_msec(&drm->device, 650, + nvif_msec(&drm->client.device, 650, if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) break; ); - nvif_msec(&drm->device, 650, + nvif_msec(&drm->client.device, 650, if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8)) break; ); @@ -770,7 +771,7 @@ static void nv_save_state_palette(struct drm_device *dev, int head, struct nv04_mode_state *state) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; int head_offset = head * NV_PRMDIO_SIZE, i; nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, @@ -789,7 +790,7 @@ void nouveau_hw_load_state_palette(struct drm_device *dev, int head, struct nv04_mode_state *state) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; int head_offset = head * NV_PRMDIO_SIZE, i; nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset, @@ -809,7 +810,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head, { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->device.info.chipset == 0x11) + if (drm->client.device.info.chipset == 0x11) /* NB: no attempt is made to restore the bad pll later on */ nouveau_hw_fix_bad_vpll(dev, head); nv_save_state_ramdac(dev, head, state); diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.h b/drivers/gpu/drm/nouveau/dispnv04/hw.h index 3bded60c5596..3a2be47fb4f1 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.h +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.h @@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp, static inline uint32_t NVReadCRTC(struct drm_device *dev, int head, uint32_t reg) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; uint32_t val; if (head) reg += NV_PCRTC0_SIZE; @@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev, static inline void NVWriteCRTC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; if (head) reg += NV_PCRTC0_SIZE; nvif_wr32(device, reg, val); @@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev, static inline uint32_t NVReadRAMDAC(struct drm_device *dev, int head, uint32_t reg) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; uint32_t val; if (head) reg += NV_PRAMDAC0_SIZE; @@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev, static inline void NVWriteRAMDAC(struct drm_device *dev, int head, uint32_t reg, uint32_t val) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; if (head) reg += NV_PRAMDAC0_SIZE; nvif_wr32(device, reg, val); @@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev, static inline void NVWriteVgaCrtc(struct drm_device *dev, int head, uint8_t index, uint8_t value) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value); } @@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev, static inline uint8_t NVReadVgaCrtc(struct drm_device *dev, int head, uint8_t index) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; uint8_t val; nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index); val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE); @@ -165,13 +165,13 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_ static inline uint8_t NVReadPRMVIO(struct drm_device *dev, int head, uint32_t reg) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; struct nouveau_drm *drm = nouveau_drm(dev); uint8_t val; /* Only NV4x have two pvio ranges; other twoHeads cards MUST call * NVSetOwner for the relevant head to be programmed */ - if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) reg += NV_PRMVIO_SIZE; val = nvif_rd08(device, reg); @@ -181,12 +181,12 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev, static inline void NVWritePRMVIO(struct drm_device *dev, int head, uint32_t reg, uint8_t value) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; struct nouveau_drm *drm = nouveau_drm(dev); /* Only NV4x have two pvio ranges; other twoHeads cards MUST call * NVSetOwner for the relevant head to be programmed */ - if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) reg += NV_PRMVIO_SIZE; nvif_wr08(device, reg, value); @@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev, static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20); } static inline bool NVGetEnablePalette(struct drm_device *dev, int head) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE); return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20); } @@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head) static inline void NVWriteVgaAttr(struct drm_device *dev, int head, uint8_t index, uint8_t value) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; if (NVGetEnablePalette(dev, head)) index &= ~0x20; else @@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev, static inline uint8_t NVReadVgaAttr(struct drm_device *dev, int head, uint8_t index) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; uint8_t val; if (NVGetEnablePalette(dev, head)) index &= ~0x20; @@ -259,10 +259,10 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect) static inline bool nv_heads_tied(struct drm_device *dev) { - struct nvif_object *device = &nouveau_drm(dev)->device.object; + struct nvif_object *device = &nouveau_drm(dev)->client.device.object; struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->device.info.chipset == 0x11) + if (drm->client.device.info.chipset == 0x11) return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28)); return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4; @@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock) NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX, lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); /* NV11 has independently lockable extended crtcs, except when tied */ - if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev)) + if (drm->client.device.info.chipset == 0x11 && !nv_heads_tied(dev)) NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX, lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE); @@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; + return drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE; } static inline void @@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) NVWriteCRTC(dev, head, NV_PCRTC_START, offset); - if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) { + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) { /* * Hilarious, the 24th bit doesn't want to stick to * PCRTC_START... @@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show) *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE); NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1); - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) nv_fix_nv40_hw_cursor(dev, head); } @@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp) bpp = 8; /* Alignment requirements taken from the Haiku driver */ - if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) mask = 128 / bpp - 1; else mask = 512 / bpp - 1; diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index 6275c270df25..5319f2a7f24d 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -97,7 +97,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_w, uint32_t src_h) { struct nouveau_drm *drm = nouveau_drm(plane->dev); - struct nvif_object *dev = &drm->device.object; + struct nvif_object *dev = &drm->client.device.object; struct nouveau_plane *nv_plane = container_of(plane, struct nouveau_plane, base); struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); @@ -119,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (format > 0xffff) return -ERANGE; - if (drm->device.info.chipset >= 0x30) { + if (drm->client.device.info.chipset >= 0x30) { if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1)) return -ERANGE; } else { @@ -174,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, static int nv10_disable_plane(struct drm_plane *plane) { - struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; + struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object; struct nouveau_plane *nv_plane = container_of(plane, struct nouveau_plane, base); @@ -198,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane) static void nv10_set_params(struct nouveau_plane *plane) { - struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object; + struct nvif_object *dev = &nouveau_drm(plane->base.dev)->client.device.object; u32 luma = (plane->brightness - 512) << 16 | plane->contrast; u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) | (cos_mul(plane->hue, plane->saturation) & 0xffff); @@ -268,7 +268,7 @@ nv10_overlay_init(struct drm_device *device) if (!plane) return; - switch (drm->device.info.chipset) { + switch (drm->client.device.info.chipset) { case 0x10: case 0x11: case 0x15: @@ -347,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { - struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; + struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object; struct nouveau_plane *nv_plane = container_of(plane, struct nouveau_plane, base); struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); @@ -427,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, static int nv04_disable_plane(struct drm_plane *plane) { - struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object; + struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object; struct nouveau_plane *nv_plane = container_of(plane, struct nouveau_plane, base); @@ -495,7 +495,7 @@ err: void nouveau_overlay_init(struct drm_device *device) { - struct nvif_device *dev = &nouveau_drm(device)->device; + struct nvif_device *dev = &nouveau_drm(device)->client.device; if (dev->info.chipset < 0x10) nv04_overlay_init(device); else if (dev->info.chipset <= 0x40) diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c index 477a8d072af4..01664357d3e1 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c @@ -54,7 +54,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = { int nv04_tv_identify(struct drm_device *dev, int i2c_index) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); + struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index); if (bus) { return nvkm_i2c_bus_probe(bus, "TV encoder", @@ -206,7 +206,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) struct drm_encoder *encoder; struct drm_device *dev = connector->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_i2c *i2c = nvxx_i2c(&drm->device); + struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device); struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index); int type, ret; diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index 434d1e29f279..6d99f11fee4e 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -46,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); + struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); uint32_t testval, regoffset = nv04_dac_output_offset(encoder); uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; @@ -130,7 +130,7 @@ static bool get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_device *device = nvxx_device(&drm->device); + struct nvkm_device *device = nvxx_device(&drm->client.device); if (device->quirk && device->quirk->tv_pin_mask) { *pin_mask = device->quirk->tv_pin_mask; @@ -154,8 +154,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) return connector_status_disconnected; if (reliable) { - if (drm->device.info.chipset == 0x42 || - drm->device.info.chipset == 0x43) + if (drm->client.device.info.chipset == 0x42 || + drm->client.device.info.chipset == 0x43) tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; else @@ -362,7 +362,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); + struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); @@ -435,7 +435,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) /* Set the DACCLK register */ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; - if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) dacclk |= 0x1a << 16; if (tv_norm->kind == CTV_ENC_MODE) { @@ -492,7 +492,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder, tv_regs->ptv_614 = 0x13; } - if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) { tv_regs->ptv_500 = 0xe8e0; tv_regs->ptv_504 = 0x1710; tv_regs->ptv_604 = 0x0; @@ -587,7 +587,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder) nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); /* This could use refinement for flatpanels, but it should work */ - if (drm->device.info.chipset < 0x44) + if (drm->client.device.info.chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h index 1b07521cde0d..29773b325bd9 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.h @@ -130,13 +130,13 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder); static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val) { - struct nvif_device *device = &nouveau_drm(dev)->device; + struct nvif_device *device = &nouveau_drm(dev)->client.device; nvif_wr32(&device->object, reg, val); } static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg) { - struct nvif_device *device = &nouveau_drm(dev)->device; + struct nvif_device *device = &nouveau_drm(dev)->client.device; return nvif_rd32(&device->object, reg); } diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h index 05e6ef7cd190..91e33db21a2f 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl826e.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl826e.h @@ -10,5 +10,5 @@ struct g82_channel_dma_v0 { __u64 offset; }; -#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00 +#define NV826E_V0_NTFY_NON_STALL_INTERRUPT 0x00 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h index cecafcb1e954..e34efd4ec537 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl826f.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl826f.h @@ -11,5 +11,5 @@ struct g82_channel_gpfifo_v0 { __u64 vm; }; -#define G82_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00 +#define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h index 2caf0838fcfd..a2d5410a491b 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl906f.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl906f.h @@ -10,5 +10,6 @@ struct fermi_channel_gpfifo_v0 { __u64 vm; }; -#define FERMI_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00 +#define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00 +#define NV906F_V0_NTFY_KILLED 0x01 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h index 46301ec018ce..2efa3d048bb9 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h @@ -25,5 +25,6 @@ struct kepler_channel_gpfifo_a_v0 { __u64 vm; }; -#define NVA06F_V0_NTFY_UEVENT 0x00 +#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00 +#define NVA06F_V0_NTFY_KILLED 0x01 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 82235f30277c..3a2c0137d4b4 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -2,23 +2,31 @@ #define __NVIF_CLASS_H__ /* these class numbers are made up by us, and not nvidia-assigned */ -#define NVIF_CLASS_CONTROL /* if0001.h */ -1 -#define NVIF_CLASS_PERFMON /* if0002.h */ -2 -#define NVIF_CLASS_PERFDOM /* if0003.h */ -3 -#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4 -#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5 -#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6 -#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7 +#define NVIF_CLASS_CLIENT /* if0000.h */ -0x00000000 + +#define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001 + +#define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002 +#define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003 + +#define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004 +#define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005 +#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006 +#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007 /* the below match nvidia-assigned (either in hw, or sw) class numbers */ +#define NV_NULL_CLASS 0x00000030 + #define NV_DEVICE /* cl0080.h */ 0x00000080 #define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002 #define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003 #define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d +#define NV50_TWOD 0x0000502d #define FERMI_TWOD_A 0x0000902d +#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039 #define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039 #define KEPLER_INLINE_TO_MEMORY_A 0x0000a040 @@ -99,6 +107,12 @@ #define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e #define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e +#define NV50_TESLA 0x00005097 +#define G82_TESLA 0x00008297 +#define GT200_TESLA 0x00008397 +#define GT214_TESLA 0x00008597 +#define GT21A_TESLA 0x00008697 + #define FERMI_A /* cl9097.h */ 0x00009097 #define FERMI_B /* cl9097.h */ 0x00009197 #define FERMI_C /* cl9097.h */ 0x00009297 @@ -140,6 +154,8 @@ #define FERMI_DECOMPRESS 0x000090b8 +#define NV50_COMPUTE 0x000050c0 +#define GT214_COMPUTE 0x000085c0 #define FERMI_COMPUTE_A 0x000090c0 #define FERMI_COMPUTE_B 0x000091c0 #define KEPLER_COMPUTE_A 0x0000a0c0 diff --git a/drivers/gpu/drm/nouveau/include/nvif/client.h b/drivers/gpu/drm/nouveau/include/nvif/client.h index 4a7f6f7b836d..b52a8eadce01 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/client.h +++ b/drivers/gpu/drm/nouveau/include/nvif/client.h @@ -11,8 +11,7 @@ struct nvif_client { bool super; }; -int nvif_client_init(const char *drv, const char *name, u64 device, - const char *cfg, const char *dbg, +int nvif_client_init(struct nvif_client *parent, const char *name, u64 device, struct nvif_client *); void nvif_client_fini(struct nvif_client *); int nvif_client_ioctl(struct nvif_client *, void *, u32); diff --git a/drivers/gpu/drm/nouveau/include/nvif/driver.h b/drivers/gpu/drm/nouveau/include/nvif/driver.h index 8bd39e69229c..0c6f48d8140a 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/driver.h +++ b/drivers/gpu/drm/nouveau/include/nvif/driver.h @@ -1,5 +1,7 @@ #ifndef __NVIF_DRIVER_H__ #define __NVIF_DRIVER_H__ +#include <nvif/os.h> +struct nvif_client; struct nvif_driver { const char *name; @@ -14,9 +16,11 @@ struct nvif_driver { bool keep; }; +int nvif_driver_init(const char *drv, const char *cfg, const char *dbg, + const char *name, u64 device, struct nvif_client *); + extern const struct nvif_driver nvif_driver_nvkm; extern const struct nvif_driver nvif_driver_drm; extern const struct nvif_driver nvif_driver_lib; extern const struct nvif_driver nvif_driver_null; - #endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0000.h b/drivers/gpu/drm/nouveau/include/nvif/if0000.h index 85c44e8a1201..c2c0fc41e017 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/if0000.h +++ b/drivers/gpu/drm/nouveau/include/nvif/if0000.h @@ -1,9 +1,16 @@ #ifndef __NVIF_IF0000_H__ #define __NVIF_IF0000_H__ -#define NV_CLIENT_DEVLIST 0x00 +struct nvif_client_v0 { + __u8 version; + __u8 pad01[7]; + __u64 device; + char name[32]; +}; + +#define NVIF_CLIENT_V0_DEVLIST 0x00 -struct nv_client_devlist_v0 { +struct nvif_client_devlist_v0 { __u8 version; __u8 count; __u8 pad02[6]; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h index eaf5905a87a3..e876634da10a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h @@ -1,5 +1,6 @@ #ifndef __NVKM_CLIENT_H__ #define __NVKM_CLIENT_H__ +#define nvkm_client(p) container_of((p), struct nvkm_client, object) #include <core/object.h> struct nvkm_client { @@ -8,9 +9,8 @@ struct nvkm_client { u64 device; u32 debug; - struct nvkm_client_notify *notify[16]; + struct nvkm_client_notify *notify[32]; struct rb_root objroot; - struct rb_root dmaroot; bool super; void *data; @@ -19,15 +19,11 @@ struct nvkm_client { struct nvkm_vm *vm; }; -bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *); -void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *); -struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object); - int nvkm_client_new(const char *name, u64 device, const char *cfg, - const char *dbg, struct nvkm_client **); -void nvkm_client_del(struct nvkm_client **); -int nvkm_client_init(struct nvkm_client *); -int nvkm_client_fini(struct nvkm_client *, bool suspend); + const char *dbg, + int (*)(const void *, u32, const void *, u32), + struct nvkm_client **); +struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle); int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *, void *data, u32 size); @@ -37,8 +33,8 @@ int nvkm_client_notify_put(struct nvkm_client *, int index); /* logging for client-facing objects */ #define nvif_printk(o,l,p,f,a...) do { \ - struct nvkm_object *_object = (o); \ - struct nvkm_client *_client = _object->client; \ + const struct nvkm_object *_object = (o); \ + const struct nvkm_client *_client = _object->client; \ if (_client->debug >= NV_DBG_##l) \ printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \ _object->handle, _object->oclass, ##a); \ diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index 6bc712f32c8b..d426b86e2712 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -262,7 +262,7 @@ extern const struct nvkm_sclass nvkm_udevice_sclass; /* device logging */ #define nvdev_printk_(d,l,p,f,a...) do { \ - struct nvkm_device *_device = (d); \ + const struct nvkm_device *_device = (d); \ if (_device->debug >= (l)) \ dev_##p(_device->dev, f, ##a); \ } while(0) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h index 9ebfd8782366..d4cd2fbfde88 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/engine.h @@ -20,6 +20,7 @@ struct nvkm_engine_func { int (*fini)(struct nvkm_engine *, bool suspend); void (*intr)(struct nvkm_engine *); void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *); + bool (*chsw_load)(struct nvkm_engine *); struct { int (*sclass)(struct nvkm_oclass *, int index, @@ -44,4 +45,5 @@ int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *, struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *); void nvkm_engine_unref(struct nvkm_engine **); void nvkm_engine_tile(struct nvkm_engine *, int region); +bool nvkm_engine_chsw_load(struct nvkm_engine *); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h index 9363b839a9da..33ca6769266a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h @@ -6,9 +6,10 @@ struct nvkm_vma; struct nvkm_vm; enum nvkm_memory_target { - NVKM_MEM_TARGET_INST, - NVKM_MEM_TARGET_VRAM, - NVKM_MEM_TARGET_HOST, + NVKM_MEM_TARGET_INST, /* instance memory */ + NVKM_MEM_TARGET_VRAM, /* video memory */ + NVKM_MEM_TARGET_HOST, /* coherent system memory */ + NVKM_MEM_TARGET_NCOH, /* non-coherent system memory */ }; struct nvkm_memory { diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h index d92fd41e4056..7bd4897a8a2a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/mm.h @@ -5,7 +5,7 @@ struct nvkm_mm_node { struct list_head nl_entry; struct list_head fl_entry; - struct list_head rl_entry; + struct nvkm_mm_node *next; #define NVKM_MM_HEAP_ANY 0x00 u8 heap; @@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max, u32 size_min, u32 align, struct nvkm_mm_node **); void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **); void nvkm_mm_dump(struct nvkm_mm *, const char *); + +static inline bool +nvkm_mm_contiguous(struct nvkm_mm_node *node) +{ + return !node->next; +} #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h index dcd048b91fac..96dda350ada3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/object.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/object.h @@ -62,6 +62,11 @@ int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data); int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align, struct nvkm_gpuobj **); +bool nvkm_object_insert(struct nvkm_object *); +void nvkm_object_remove(struct nvkm_object *); +struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object, + const struct nvkm_object_func *); + struct nvkm_sclass { int minver; int maxver; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h index 57adefa8b08e..ca9ed3d68f44 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h @@ -32,7 +32,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *); /* subdev logging */ #define nvkm_printk_(s,l,p,f,a...) do { \ - struct nvkm_subdev *_subdev = (s); \ + const struct nvkm_subdev *_subdev = (s); \ if (_subdev->debug >= (l)) { \ dev_##p(_subdev->device->dev, "%s: "f, \ nvkm_subdev_name[_subdev->index], ##a); \ diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h index 114bfb737a81..d2a6532ce3b9 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/dma.h @@ -12,9 +12,6 @@ struct nvkm_dmaobj { u32 access; u64 start; u64 limit; - - struct rb_node rb; - u64 handle; /*XXX HANDLE MERGE */ }; struct nvkm_dma { @@ -22,8 +19,7 @@ struct nvkm_dma { struct nvkm_engine engine; }; -struct nvkm_dmaobj * -nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object); +struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object); int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **); int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h index e6baf039c269..7e498e65b1e8 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h @@ -4,13 +4,26 @@ #include <core/engine.h> struct nvkm_fifo_chan; +enum nvkm_falcon_dmaidx { + FALCON_DMAIDX_UCODE = 0, + FALCON_DMAIDX_VIRT = 1, + FALCON_DMAIDX_PHYS_VID = 2, + FALCON_DMAIDX_PHYS_SYS_COH = 3, + FALCON_DMAIDX_PHYS_SYS_NCOH = 4, +}; + struct nvkm_falcon { const struct nvkm_falcon_func *func; - struct nvkm_engine engine; - + const struct nvkm_subdev *owner; + const char *name; u32 addr; - u8 version; - u8 secret; + + struct mutex mutex; + const struct nvkm_subdev *user; + + u8 version; + u8 secret; + bool debug; struct nvkm_memory *core; bool external; @@ -19,15 +32,25 @@ struct nvkm_falcon { u32 limit; u32 *data; u32 size; + u8 ports; } code; struct { u32 limit; u32 *data; u32 size; + u8 ports; } data; + + struct nvkm_engine engine; }; +int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr, + struct nvkm_falcon **); +void nvkm_falcon_del(struct nvkm_falcon **); +int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *); +void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *); + int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *, int index, bool enable, u32 addr, struct nvkm_engine **); @@ -42,6 +65,51 @@ struct nvkm_falcon_func { } data; void (*init)(struct nvkm_falcon *); void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *); + void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool); + void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8); + void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *); + void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *); + int (*wait_for_halt)(struct nvkm_falcon *, u32); + int (*clear_interrupt)(struct nvkm_falcon *, u32); + void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr); + void (*start)(struct nvkm_falcon *); + int (*enable)(struct nvkm_falcon *falcon); + void (*disable)(struct nvkm_falcon *falcon); + struct nvkm_sclass sclass[]; }; + +static inline u32 +nvkm_falcon_rd32(struct nvkm_falcon *falcon, u32 addr) +{ + return nvkm_rd32(falcon->owner->device, falcon->addr + addr); +} + +static inline void +nvkm_falcon_wr32(struct nvkm_falcon *falcon, u32 addr, u32 data) +{ + nvkm_wr32(falcon->owner->device, falcon->addr + addr, data); +} + +static inline u32 +nvkm_falcon_mask(struct nvkm_falcon *falcon, u32 addr, u32 mask, u32 val) +{ + struct nvkm_device *device = falcon->owner->device; + + return nvkm_mask(device, falcon->addr + addr, mask, val); +} + +void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8, + bool); +void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8); +void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *); +void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *); +void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32); +void nvkm_falcon_start(struct nvkm_falcon *); +int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32); +int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32); +int nvkm_falcon_enable(struct nvkm_falcon *); +void nvkm_falcon_disable(struct nvkm_falcon *); +int nvkm_falcon_reset(struct nvkm_falcon *); + #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index ed92fec5292c..24efa900d8ca 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -40,6 +40,7 @@ struct nvkm_fifo { struct nvkm_event uevent; /* async user trigger */ struct nvkm_event cevent; /* channel creation event */ + struct nvkm_event kevent; /* channel killed */ }; void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h new file mode 100644 index 000000000000..f5f4a14c4030 --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/power_budget.h @@ -0,0 +1,26 @@ +#ifndef __NVBIOS_POWER_BUDGET_H__ +#define __NVBIOS_POWER_BUDGET_H__ + +#include <nvkm/subdev/bios.h> + +struct nvbios_power_budget_entry { + u32 min_w; + u32 avg_w; + u32 max_w; +}; + +struct nvbios_power_budget { + u32 offset; + u8 ver; + u8 hlen; + u8 elen; + u8 ecount; + u8 cap_entry; +}; + +int nvbios_power_budget_header(struct nvkm_bios *, + struct nvbios_power_budget *); +int nvbios_power_budget_entry(struct nvkm_bios *, struct nvbios_power_budget *, + u8 idx, struct nvbios_power_budget_entry *); + +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 794e432578b2..0b26a4c860ec 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -29,7 +29,7 @@ struct nvkm_mem { u8 page_shift; struct nvkm_mm_node *tag; - struct list_head regions; + struct nvkm_mm_node *mem; dma_addr_t *pages; u32 memtype; u64 offset; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h index 3c2ddd975273..b7a9b041e130 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/iccsense.h @@ -8,6 +8,9 @@ struct nvkm_iccsense { bool data_valid; struct list_head sensors; struct list_head rails; + + u32 power_w_max; + u32 power_w_crit; }; int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h index 27d25b18d85c..e68ba636741b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h @@ -9,6 +9,7 @@ struct nvkm_mc { void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx); void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx); +bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx); void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx); void nvkm_mc_intr(struct nvkm_device *, bool *handled); void nvkm_mc_intr_unarm(struct nvkm_device *); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h index e6523e2cea9f..ac2a695963c1 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h @@ -43,6 +43,7 @@ int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **); +int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h index f37538eb1fe5..179b6ed3f595 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h @@ -1,10 +1,12 @@ #ifndef __NVKM_PMU_H__ #define __NVKM_PMU_H__ #include <core/subdev.h> +#include <engine/falcon.h> struct nvkm_pmu { const struct nvkm_pmu_func *func; struct nvkm_subdev subdev; + struct nvkm_falcon *falcon; struct { u32 base; @@ -35,6 +37,7 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); +int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h index b04c38c07761..5dbd8aa4f8c2 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h @@ -26,7 +26,7 @@ #include <core/subdev.h> enum nvkm_secboot_falcon { - NVKM_SECBOOT_FALCON_PMU = 0, + NVKM_SECBOOT_FALCON_PMU = 0, NVKM_SECBOOT_FALCON_RESERVED = 1, NVKM_SECBOOT_FALCON_FECS = 2, NVKM_SECBOOT_FALCON_GPCCS = 3, @@ -35,22 +35,23 @@ enum nvkm_secboot_falcon { }; /** - * @base: base IO address of the falcon performing secure boot - * @irq_mask: IRQ mask of the falcon performing secure boot - * @enable_mask: enable mask of the falcon performing secure boot + * @wpr_set: whether the WPR region is currently set */ struct nvkm_secboot { const struct nvkm_secboot_func *func; + struct nvkm_acr *acr; struct nvkm_subdev subdev; + struct nvkm_falcon *boot_falcon; - enum nvkm_devidx devidx; - u32 base; + u64 wpr_addr; + u32 wpr_size; + + bool wpr_set; }; #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon); -int nvkm_secboot_reset(struct nvkm_secboot *, u32 falcon); -int nvkm_secboot_start(struct nvkm_secboot *, u32 falcon); +int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon); int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h index 82d3e28918fd..6a567fe347b3 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h @@ -48,10 +48,8 @@ void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *); } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \ \ if (_taken >= _nsecs) { \ - if (_warn) { \ - dev_warn(_device->dev, "timeout at %s:%d/%s()!\n", \ - __FILE__, __LINE__, __func__); \ - } \ + if (_warn) \ + dev_WARN(_device->dev, "timeout\n"); \ _taken = -ETIMEDOUT; \ } \ _taken; \ diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h index 71ebbfd4484f..d23209b62c25 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h @@ -11,6 +11,7 @@ struct nvkm_top { u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx); u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs); u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx); +int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx); enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault); enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn); diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 4df4f6ed4886..f98f800cc011 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -87,7 +87,7 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret) s32 nouveau_abi16_swclass(struct nouveau_drm *drm) { - switch (drm->device.info.family) { + switch (drm->client.device.info.family) { case NV_DEVICE_INFO_V0_TNT: return NVIF_CLASS_SW_NV04; case NV_DEVICE_INFO_V0_CELSIUS: @@ -175,7 +175,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; struct nvkm_gr *gr = nvxx_gr(device); struct drm_nouveau_getparam *getparam = data; @@ -321,7 +321,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS) } /* Named memory object area */ - ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, + ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 0, 0, &chan->ntfy); if (ret == 0) ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 8b1ca4add2ed..380f340204e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -65,7 +65,7 @@ static int nv40_get_intensity(struct backlight_device *bd) { struct nouveau_drm *drm = bl_get_data(bd); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK) >> 16; @@ -76,7 +76,7 @@ static int nv40_set_intensity(struct backlight_device *bd) { struct nouveau_drm *drm = bl_get_data(bd); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; int val = bd->props.brightness; int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT); @@ -96,7 +96,7 @@ static int nv40_backlight_init(struct drm_connector *connector) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; struct backlight_properties props; struct backlight_device *bd; struct backlight_connector bl_connector; @@ -133,7 +133,7 @@ nv50_get_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; int or = nv_encoder->or; u32 div = 1025; u32 val; @@ -148,7 +148,7 @@ nv50_set_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; int or = nv_encoder->or; u32 div = 1025; u32 val = (bd->props.brightness * div) / 100; @@ -169,7 +169,7 @@ nva3_get_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; int or = nv_encoder->or; u32 div, val; @@ -187,7 +187,7 @@ nva3_set_intensity(struct backlight_device *bd) { struct nouveau_encoder *nv_encoder = bl_get_data(bd); struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; int or = nv_encoder->or; u32 div, val; @@ -213,7 +213,7 @@ static int nv50_backlight_init(struct drm_connector *connector) { struct nouveau_drm *drm = nouveau_drm(connector->dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; struct nouveau_encoder *nv_encoder; struct backlight_properties props; struct backlight_device *bd; @@ -231,9 +231,9 @@ nv50_backlight_init(struct drm_connector *connector) if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) return 0; - if (drm->device.info.chipset <= 0xa0 || - drm->device.info.chipset == 0xaa || - drm->device.info.chipset == 0xac) + if (drm->client.device.info.chipset <= 0xa0 || + drm->client.device.info.chipset == 0xaa || + drm->client.device.info.chipset == 0xac) ops = &nv50_bl_ops; else ops = &nva3_bl_ops; @@ -265,7 +265,7 @@ int nouveau_backlight_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; struct drm_connector *connector; if (apple_gmux_present()) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 23ffe8571a99..9a0772ad495a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; struct nvbios *bios = &drm->vbios; uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; uint32_t sel_clk_binding, sel_clk; @@ -319,7 +319,7 @@ static int get_fp_strap(struct drm_device *dev, struct nvbios *bios) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; /* * The fp strap is normally dictated by the "User Strap" in @@ -333,10 +333,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios) if (bios->major_version < 5 && bios->data[0x48] & 0x4) return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_MAXWELL) return nvif_rd32(device, 0x001800) & 0x0000000f; else - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf; else return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf; @@ -638,7 +638,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; struct nvbios *bios = &drm->vbios; int cv = bios->chip_version; uint16_t clktable = 0, scriptptr; @@ -1255,7 +1255,7 @@ olddcb_table(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); u8 *dcb = NULL; - if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT) + if (drm->client.device.info.family > NV_DEVICE_INFO_V0_TNT) dcb = ROMPTR(dev, drm->vbios.data[0x36]); if (!dcb) { NV_WARN(drm, "No DCB data found in VBIOS\n"); @@ -1918,7 +1918,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio */ struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_object *device = &drm->device.object; + struct nvif_object *device = &drm->client.device.object; uint8_t bytes_to_write; uint16_t hwsq_entry_offset; int i; @@ -2012,7 +2012,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) static bool NVInitVBIOS(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_bios *bios = nvxx_bios(&drm->device); + struct nvkm_bios *bios = nvxx_bios(&drm->client.device); struct nvbios *legacy = &drm->vbios; memset(legacy, 0, sizeof(struct nvbios)); @@ -2064,7 +2064,7 @@ nouveau_bios_posted(struct drm_device *dev) struct nouveau_drm *drm = nouveau_drm(dev); unsigned htotal; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) return true; htotal = NVReadVgaCrtc(dev, 0, 0x06); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 8a528ebe30f3..548f36d33924 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -48,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, { struct nouveau_drm *drm = nouveau_drm(dev); int i = reg - drm->tile.reg; - struct nvkm_device *device = nvxx_device(&drm->device); + struct nvkm_device *device = nvxx_device(&drm->client.device); struct nvkm_fb *fb = device->fb; struct nvkm_fb_tile *tile = &fb->tile.region[i]; @@ -100,7 +100,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr, u32 size, u32 pitch, u32 flags) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_fb *fb = nvxx_fb(&drm->device); + struct nvkm_fb *fb = nvxx_fb(&drm->client.device); struct nouveau_drm_tile *tile, *found = NULL; int i; @@ -139,60 +139,62 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) kfree(nvbo); } +static inline u64 +roundup_64(u64 x, u32 y) +{ + x += y - 1; + do_div(x, y); + return x * y; +} + static void nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, - int *align, int *size) + int *align, u64 *size) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { if (nvbo->tile_mode) { if (device->info.chipset >= 0x40) { *align = 65536; - *size = roundup(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->tile_mode); } else if (device->info.chipset >= 0x30) { *align = 32768; - *size = roundup(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->tile_mode); } else if (device->info.chipset >= 0x20) { *align = 16384; - *size = roundup(*size, 64 * nvbo->tile_mode); + *size = roundup_64(*size, 64 * nvbo->tile_mode); } else if (device->info.chipset >= 0x10) { *align = 16384; - *size = roundup(*size, 32 * nvbo->tile_mode); + *size = roundup_64(*size, 32 * nvbo->tile_mode); } } } else { - *size = roundup(*size, (1 << nvbo->page_shift)); + *size = roundup_64(*size, (1 << nvbo->page_shift)); *align = max((1 << nvbo->page_shift), *align); } - *size = roundup(*size, PAGE_SIZE); + *size = roundup_64(*size, PAGE_SIZE); } int -nouveau_bo_new(struct drm_device *dev, int size, int align, +nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, struct sg_table *sg, struct reservation_object *robj, struct nouveau_bo **pnvbo) { - struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_drm *drm = nouveau_drm(cli->dev); struct nouveau_bo *nvbo; size_t acc_size; int ret; int type = ttm_bo_type_device; - int lpg_shift = 12; - int max_size; - - if (drm->client.vm) - lpg_shift = drm->client.vm->mmu->lpg_shift; - max_size = INT_MAX & ~((1 << lpg_shift) - 1); - if (size <= 0 || size > max_size) { - NV_WARN(drm, "skipped size %x\n", (u32)size); + if (!size) { + NV_WARN(drm, "skipped size %016llx\n", size); return -EINVAL; } @@ -208,8 +210,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->tile_mode = tile_mode; nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &drm->ttm.bdev; + nvbo->cli = cli; - if (!nvxx_device(&drm->device)->func->cpu_coherent) + if (!nvxx_device(&drm->client.device)->func->cpu_coherent) nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; nvbo->page_shift = 12; @@ -255,10 +258,10 @@ static void set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT; + u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; unsigned i, fpfn, lpfn; - if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && nvbo->bo.mem.num_pages < vram_pages / 4) { /* @@ -316,12 +319,12 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) if (ret) return ret; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA && + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && memtype == TTM_PL_FLAG_VRAM && contig) { if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) { if (bo->mem.mem_type == TTM_PL_VRAM) { struct nvkm_mem *mem = bo->mem.mm_node; - if (!list_is_singular(&mem->regions)) + if (!nvkm_mm_contiguous(mem->mem)) evict = true; } nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG; @@ -443,7 +446,7 @@ void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nvkm_device *device = nvxx_device(&drm->device); + struct nvkm_device *device = nvxx_device(&drm->client.device); struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; int i; @@ -463,7 +466,7 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct nvkm_device *device = nvxx_device(&drm->device); + struct nvkm_device *device = nvxx_device(&drm->client.device); struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; int i; @@ -579,9 +582,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { /* Some BARs do not support being ioremapped WC */ - if (nvxx_bar(&drm->device)->iomap_uncached) { + if (nvxx_bar(&drm->client.device)->iomap_uncached) { man->available_caching = TTM_PL_FLAG_UNCACHED; man->default_caching = TTM_PL_FLAG_UNCACHED; } @@ -594,7 +597,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, } break; case TTM_PL_TT: - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) man->func = &nouveau_gart_manager; else if (!drm->agp.bridge) @@ -654,20 +657,20 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *node = old_mem->mm_node; + struct nvkm_mem *mem = old_reg->mm_node; int ret = RING_SPACE(chan, 10); if (ret == 0) { BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); - OUT_RING (chan, upper_32_bits(node->vma[0].offset)); - OUT_RING (chan, lower_32_bits(node->vma[0].offset)); - OUT_RING (chan, upper_32_bits(node->vma[1].offset)); - OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); + OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); + OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); OUT_RING (chan, PAGE_SIZE); OUT_RING (chan, PAGE_SIZE); OUT_RING (chan, PAGE_SIZE); - OUT_RING (chan, new_mem->num_pages); + OUT_RING (chan, new_reg->num_pages); BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); } return ret; @@ -686,15 +689,15 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *node = old_mem->mm_node; - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; - u32 page_count = new_mem->num_pages; + struct nvkm_mem *mem = old_reg->mm_node; + u64 src_offset = mem->vma[0].offset; + u64 dst_offset = mem->vma[1].offset; + u32 page_count = new_reg->num_pages; int ret; - page_count = new_mem->num_pages; + page_count = new_reg->num_pages; while (page_count) { int line_count = (page_count > 8191) ? 8191 : page_count; @@ -724,15 +727,15 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *node = old_mem->mm_node; - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; - u32 page_count = new_mem->num_pages; + struct nvkm_mem *mem = old_reg->mm_node; + u64 src_offset = mem->vma[0].offset; + u64 dst_offset = mem->vma[1].offset; + u32 page_count = new_reg->num_pages; int ret; - page_count = new_mem->num_pages; + page_count = new_reg->num_pages; while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; @@ -763,15 +766,15 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *node = old_mem->mm_node; - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; - u32 page_count = new_mem->num_pages; + struct nvkm_mem *mem = old_reg->mm_node; + u64 src_offset = mem->vma[0].offset; + u64 dst_offset = mem->vma[1].offset; + u32 page_count = new_reg->num_pages; int ret; - page_count = new_mem->num_pages; + page_count = new_reg->num_pages; while (page_count) { int line_count = (page_count > 8191) ? 8191 : page_count; @@ -801,35 +804,35 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *node = old_mem->mm_node; + struct nvkm_mem *mem = old_reg->mm_node; int ret = RING_SPACE(chan, 7); if (ret == 0) { BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); - OUT_RING (chan, upper_32_bits(node->vma[0].offset)); - OUT_RING (chan, lower_32_bits(node->vma[0].offset)); - OUT_RING (chan, upper_32_bits(node->vma[1].offset)); - OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); + OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); + OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); OUT_RING (chan, 0x00000000 /* COPY */); - OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); + OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); } return ret; } static int nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *node = old_mem->mm_node; + struct nvkm_mem *mem = old_reg->mm_node; int ret = RING_SPACE(chan, 7); if (ret == 0) { BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); - OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); - OUT_RING (chan, upper_32_bits(node->vma[0].offset)); - OUT_RING (chan, lower_32_bits(node->vma[0].offset)); - OUT_RING (chan, upper_32_bits(node->vma[1].offset)); - OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); + OUT_RING (chan, upper_32_bits(mem->vma[0].offset)); + OUT_RING (chan, lower_32_bits(mem->vma[0].offset)); + OUT_RING (chan, upper_32_bits(mem->vma[1].offset)); + OUT_RING (chan, lower_32_bits(mem->vma[1].offset)); OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); } return ret; @@ -853,14 +856,14 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) static int nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - struct nvkm_mem *node = old_mem->mm_node; - u64 length = (new_mem->num_pages << PAGE_SHIFT); - u64 src_offset = node->vma[0].offset; - u64 dst_offset = node->vma[1].offset; - int src_tiled = !!node->memtype; - int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype; + struct nvkm_mem *mem = old_reg->mm_node; + u64 length = (new_reg->num_pages << PAGE_SHIFT); + u64 src_offset = mem->vma[0].offset; + u64 dst_offset = mem->vma[1].offset; + int src_tiled = !!mem->memtype; + int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype; int ret; while (length) { @@ -940,20 +943,20 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) static inline uint32_t nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, - struct nouveau_channel *chan, struct ttm_mem_reg *mem) + struct nouveau_channel *chan, struct ttm_mem_reg *reg) { - if (mem->mem_type == TTM_PL_TT) + if (reg->mem_type == TTM_PL_TT) return NvDmaTT; return chan->vram.handle; } static int nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, - struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) { - u32 src_offset = old_mem->start << PAGE_SHIFT; - u32 dst_offset = new_mem->start << PAGE_SHIFT; - u32 page_count = new_mem->num_pages; + u32 src_offset = old_reg->start << PAGE_SHIFT; + u32 dst_offset = new_reg->start << PAGE_SHIFT; + u32 page_count = new_reg->num_pages; int ret; ret = RING_SPACE(chan, 3); @@ -961,10 +964,10 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, return ret; BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg)); + OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg)); - page_count = new_mem->num_pages; + page_count = new_reg->num_pages; while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; @@ -995,33 +998,33 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, static int nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *reg) { - struct nvkm_mem *old_node = bo->mem.mm_node; - struct nvkm_mem *new_node = mem->mm_node; - u64 size = (u64)mem->num_pages << PAGE_SHIFT; + struct nvkm_mem *old_mem = bo->mem.mm_node; + struct nvkm_mem *new_mem = reg->mm_node; + u64 size = (u64)reg->num_pages << PAGE_SHIFT; int ret; - ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift, - NV_MEM_ACCESS_RW, &old_node->vma[0]); + ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift, + NV_MEM_ACCESS_RW, &old_mem->vma[0]); if (ret) return ret; - ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift, - NV_MEM_ACCESS_RW, &old_node->vma[1]); + ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift, + NV_MEM_ACCESS_RW, &old_mem->vma[1]); if (ret) { - nvkm_vm_put(&old_node->vma[0]); + nvkm_vm_put(&old_mem->vma[0]); return ret; } - nvkm_vm_map(&old_node->vma[0], old_node); - nvkm_vm_map(&old_node->vma[1], new_node); + nvkm_vm_map(&old_mem->vma[0], old_mem); + nvkm_vm_map(&old_mem->vma[1], new_mem); return 0; } static int nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_gpu, struct ttm_mem_reg *new_reg) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_channel *chan = drm->ttm.chan; @@ -1033,8 +1036,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, * old nvkm_mem node, these will get cleaned up after ttm has * destroyed the ttm_mem_reg */ - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - ret = nouveau_bo_move_prep(drm, bo, new_mem); + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_bo_move_prep(drm, bo, new_reg); if (ret) return ret; } @@ -1042,14 +1045,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); if (ret == 0) { - ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); + ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); if (ret == 0) { ret = nouveau_fence_new(chan, false, &fence); if (ret == 0) { ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, - new_mem); + new_reg); nouveau_fence_unref(&fence); } } @@ -1124,7 +1127,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_gpu, struct ttm_mem_reg *new_reg) { struct ttm_place placement_memtype = { .fpfn = 0, @@ -1132,35 +1135,35 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING }; struct ttm_placement placement; - struct ttm_mem_reg tmp_mem; + struct ttm_mem_reg tmp_reg; int ret; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); + tmp_reg = *new_reg; + tmp_reg.mm_node = NULL; + ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); if (ret) return ret; - ret = ttm_tt_bind(bo->ttm, &tmp_mem); + ret = ttm_tt_bind(bo->ttm, &tmp_reg); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg); if (ret) goto out; - ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg); out: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_bo_mem_put(bo, &tmp_reg); return ret; } static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_gpu, struct ttm_mem_reg *new_reg) { struct ttm_place placement_memtype = { .fpfn = 0, @@ -1168,34 +1171,34 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING }; struct ttm_placement placement; - struct ttm_mem_reg tmp_mem; + struct ttm_mem_reg tmp_reg; int ret; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; - tmp_mem = *new_mem; - tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); + tmp_reg = *new_reg; + tmp_reg.mm_node = NULL; + ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu); if (ret) return ret; - ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg); if (ret) goto out; out: - ttm_bo_mem_put(bo, &tmp_mem); + ttm_bo_mem_put(bo, &tmp_reg); return ret; } static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, - struct ttm_mem_reg *new_mem) + struct ttm_mem_reg *new_reg) { struct nouveau_bo *nvbo = nouveau_bo(bo); struct nvkm_vma *vma; @@ -1205,10 +1208,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, return; list_for_each_entry(vma, &nvbo->vma_list, head) { - if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && - (new_mem->mem_type == TTM_PL_VRAM || + if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM && + (new_reg->mem_type == TTM_PL_VRAM || nvbo->page_shift != vma->vm->mmu->lpg_shift)) { - nvkm_vm_map(vma, new_mem->mm_node); + nvkm_vm_map(vma, new_reg->mm_node); } else { WARN_ON(ttm_bo_wait(bo, false, false)); nvkm_vm_unmap(vma); @@ -1217,20 +1220,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, } static int -nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, struct nouveau_drm_tile **new_tile) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct drm_device *dev = drm->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - u64 offset = new_mem->start << PAGE_SHIFT; + u64 offset = new_reg->start << PAGE_SHIFT; *new_tile = NULL; - if (new_mem->mem_type != TTM_PL_VRAM) + if (new_reg->mem_type != TTM_PL_VRAM) return 0; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { - *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { + *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size, nvbo->tile_mode, nvbo->tile_flags); } @@ -1253,11 +1256,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) + bool no_wait_gpu, struct ttm_mem_reg *new_reg) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct ttm_mem_reg *old_mem = &bo->mem; + struct ttm_mem_reg *old_reg = &bo->mem; struct nouveau_drm_tile *new_tile = NULL; int ret = 0; @@ -1268,31 +1271,31 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, if (nvbo->pin_refcnt) NV_WARN(drm, "Moving pinned object %p!\n", nvbo); - if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { - ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { + ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); if (ret) return ret; } /* Fake bo copy. */ - if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { + if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { BUG_ON(bo->mem.mm_node != NULL); - bo->mem = *new_mem; - new_mem->mm_node = NULL; + bo->mem = *new_reg; + new_reg->mm_node = NULL; goto out; } /* Hardware assisted copy. */ if (drm->ttm.move) { - if (new_mem->mem_type == TTM_PL_SYSTEM) + if (new_reg->mem_type == TTM_PL_SYSTEM) ret = nouveau_bo_move_flipd(bo, evict, intr, - no_wait_gpu, new_mem); - else if (old_mem->mem_type == TTM_PL_SYSTEM) + no_wait_gpu, new_reg); + else if (old_reg->mem_type == TTM_PL_SYSTEM) ret = nouveau_bo_move_flips(bo, evict, intr, - no_wait_gpu, new_mem); + no_wait_gpu, new_reg); else ret = nouveau_bo_move_m2mf(bo, evict, intr, - no_wait_gpu, new_mem); + no_wait_gpu, new_reg); if (!ret) goto out; } @@ -1300,10 +1303,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Fallback to software copy. */ ret = ttm_bo_wait(bo, intr, no_wait_gpu); if (ret == 0) - ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg); out: - if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { if (ret) nouveau_bo_vm_cleanup(bo, NULL, &new_tile); else @@ -1323,54 +1326,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) } static int -nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) { - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type]; struct nouveau_drm *drm = nouveau_bdev(bdev); - struct nvkm_device *device = nvxx_device(&drm->device); - struct nvkm_mem *node = mem->mm_node; + struct nvkm_device *device = nvxx_device(&drm->client.device); + struct nvkm_mem *mem = reg->mm_node; int ret; - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - mem->bus.is_iomem = false; + reg->bus.addr = NULL; + reg->bus.offset = 0; + reg->bus.size = reg->num_pages << PAGE_SHIFT; + reg->bus.base = 0; + reg->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; - switch (mem->mem_type) { + switch (reg->mem_type) { case TTM_PL_SYSTEM: /* System memory */ return 0; case TTM_PL_TT: #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = drm->agp.base; - mem->bus.is_iomem = !drm->agp.cma; + reg->bus.offset = reg->start << PAGE_SHIFT; + reg->bus.base = drm->agp.base; + reg->bus.is_iomem = !drm->agp.cma; } #endif - if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype) + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype) /* untiled */ break; /* fallthrough, tiled memory */ case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = device->func->resource_addr(device, 1); - mem->bus.is_iomem = true; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - struct nvkm_bar *bar = nvxx_bar(&drm->device); + reg->bus.offset = reg->start << PAGE_SHIFT; + reg->bus.base = device->func->resource_addr(device, 1); + reg->bus.is_iomem = true; + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + struct nvkm_bar *bar = nvxx_bar(&drm->client.device); int page_shift = 12; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI) - page_shift = node->page_shift; + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) + page_shift = mem->page_shift; - ret = nvkm_bar_umap(bar, node->size << 12, page_shift, - &node->bar_vma); + ret = nvkm_bar_umap(bar, mem->size << 12, page_shift, + &mem->bar_vma); if (ret) return ret; - nvkm_vm_map(&node->bar_vma, node); - mem->bus.offset = node->bar_vma.offset; + nvkm_vm_map(&mem->bar_vma, mem); + reg->bus.offset = mem->bar_vma.offset; } break; default: @@ -1380,15 +1383,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) } static void -nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) { - struct nvkm_mem *node = mem->mm_node; + struct nvkm_mem *mem = reg->mm_node; - if (!node->bar_vma.node) + if (!mem->bar_vma.node) return; - nvkm_vm_unmap(&node->bar_vma); - nvkm_vm_put(&node->bar_vma); + nvkm_vm_unmap(&mem->bar_vma); + nvkm_vm_put(&mem->bar_vma); } static int @@ -1396,7 +1399,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nvkm_device *device = nvxx_device(&drm->device); + struct nvkm_device *device = nvxx_device(&drm->client.device); u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; int i, ret; @@ -1404,7 +1407,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { - if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !nouveau_bo_tile_layout(nvbo)) return 0; @@ -1419,7 +1422,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } /* make sure bo is in mappable vram */ - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA || + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || bo->mem.start + bo->mem.num_pages < mappable) return 0; @@ -1461,7 +1464,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) } drm = nouveau_bdev(ttm->bdev); - device = nvxx_device(&drm->device); + device = nvxx_device(&drm->client.device); dev = drm->dev; pdev = device->dev; @@ -1518,7 +1521,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) return; drm = nouveau_bdev(ttm->bdev); - device = nvxx_device(&drm->device); + device = nvxx_device(&drm->client.device); dev = drm->dev; pdev = device->dev; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h index e42360983229..b06a5385d6dd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h @@ -26,6 +26,8 @@ struct nouveau_bo { struct list_head vma_list; unsigned page_shift; + struct nouveau_cli *cli; + u32 tile_mode; u32 tile_flags; struct nouveau_drm_tile *tile; @@ -69,7 +71,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) extern struct ttm_bo_driver nouveau_bo_driver; void nouveau_bo_move_init(struct nouveau_drm *); -int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags, +int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, u32 tile_mode, u32 tile_flags, struct sg_table *sg, struct reservation_object *robj, struct nouveau_bo **); diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index f9b3c811187e..dbc41fa86ee8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -45,10 +45,20 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM"); int nouveau_vram_pushbuf; module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); +static int +nouveau_channel_killed(struct nvif_notify *ntfy) +{ + struct nouveau_channel *chan = container_of(ntfy, typeof(*chan), kill); + struct nouveau_cli *cli = (void *)chan->user.client; + NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid); + atomic_set(&chan->killed, 1); + return NVIF_NOTIFY_DROP; +} + int nouveau_channel_idle(struct nouveau_channel *chan) { - if (likely(chan && chan->fence)) { + if (likely(chan && chan->fence && !atomic_read(&chan->killed))) { struct nouveau_cli *cli = (void *)chan->user.client; struct nouveau_fence *fence = NULL; int ret; @@ -78,6 +88,7 @@ nouveau_channel_del(struct nouveau_channel **pchan) nvif_object_fini(&chan->nvsw); nvif_object_fini(&chan->gart); nvif_object_fini(&chan->vram); + nvif_notify_fini(&chan->kill); nvif_object_fini(&chan->user); nvif_object_fini(&chan->push.ctxdma); nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); @@ -107,13 +118,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, chan->device = device; chan->drm = drm; + atomic_set(&chan->killed, 0); /* allocate memory for dma push buffer */ target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; if (nouveau_vram_pushbuf) target = TTM_PL_FLAG_VRAM; - ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL, + ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL, &chan->push.buffer); if (ret == 0) { ret = nouveau_bo_pin(chan->push.buffer, target, false); @@ -301,12 +313,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) { struct nvif_device *device = chan->device; struct nouveau_cli *cli = (void *)chan->user.client; + struct nouveau_drm *drm = chan->drm; struct nvkm_mmu *mmu = nvxx_mmu(device); struct nv_dma_v0 args = {}; int ret, i; nvif_object_map(&chan->user); + if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) { + ret = nvif_notify_init(&chan->user, nouveau_channel_killed, + true, NV906F_V0_NTFY_KILLED, + NULL, 0, 0, &chan->kill); + if (ret == 0) + ret = nvif_notify_get(&chan->kill); + if (ret) { + NV_ERROR(drm, "Failed to request channel kill " + "notification: %d\n", ret); + return ret; + } + } + /* allocate dma objects to cover all allowed vram, and gart */ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h index 48062c94f36d..46b947ba1cf4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.h +++ b/drivers/gpu/drm/nouveau/nouveau_chan.h @@ -1,7 +1,7 @@ #ifndef __NOUVEAU_CHAN_H__ #define __NOUVEAU_CHAN_H__ - #include <nvif/object.h> +#include <nvif/notify.h> struct nvif_device; struct nouveau_channel { @@ -38,6 +38,9 @@ struct nouveau_channel { u32 user_put; struct nvif_object user; + + struct nvif_notify kill; + atomic_t killed; }; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 966d20ab4de4..f5add64c093f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -419,7 +419,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_gpio *gpio = nvxx_gpio(&drm->device); + struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device); struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; int i, panel = -ENODEV; @@ -521,7 +521,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, return; nv_connector->detected_encoder = nv_encoder; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { connector->interlace_allowed = true; connector->doublescan_allowed = true; } else @@ -531,8 +531,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, connector->interlace_allowed = false; } else { connector->doublescan_allowed = true; - if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN || - (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && + if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN || + (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && (dev->pdev->device & 0x0ff0) != 0x0100 && (dev->pdev->device & 0x0ff0) != 0x0150)) /* HW is broken */ @@ -984,17 +984,17 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi) /* Note: these limits are conservative, some Fermi's * can do 297 MHz. Unclear how this can be determined. */ - if (drm->device.info.family >= NV_DEVICE_INFO_V0_KEPLER) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KEPLER) return 297000; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI) + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI) return 225000; } if (dcb->location != DCB_LOC_ON_CHIP || - drm->device.info.chipset >= 0x46) + drm->client.device.info.chipset >= 0x46) return 165000; - else if (drm->device.info.chipset >= 0x40) + else if (drm->client.device.info.chipset >= 0x40) return 155000; - else if (drm->device.info.chipset >= 0x18) + else if (drm->client.device.info.chipset >= 0x18) return 135000; else return 112000; @@ -1041,7 +1041,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector, clock = clock * (connector->display_info.bpc * 3) / 10; break; default: - BUG_ON(1); + BUG(); return MODE_BAD; } diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 411c12cdb249..fd64dfdc7d4f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -259,8 +259,9 @@ nouveau_debugfs_init(struct nouveau_drm *drm) if (!drm->debugfs) return -ENOMEM; - ret = nvif_object_init(&drm->device.object, 0, NVIF_CLASS_CONTROL, - NULL, 0, &drm->debugfs->ctrl); + ret = nvif_object_init(&drm->client.device.object, 0, + NVIF_CLASS_CONTROL, NULL, 0, + &drm->debugfs->ctrl); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6b570079d185..72fdba1a1c5d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -414,7 +414,8 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable polling for external displays */ - drm_kms_helper_poll_enable(dev); + if (!dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -495,7 +496,7 @@ int nouveau_display_create(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nvkm_device *device = nvxx_device(&drm->device); + struct nvkm_device *device = nvxx_device(&drm->client.device); struct nouveau_display *disp; int ret; @@ -512,15 +513,15 @@ nouveau_display_create(struct drm_device *dev) dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; - if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; } else - if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else - if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) { + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) { dev->mode_config.max_width = 8192; dev->mode_config.max_height = 8192; } else { @@ -531,7 +532,7 @@ nouveau_display_create(struct drm_device *dev) dev->mode_config.preferred_depth = 24; dev->mode_config.prefer_shadow = 1; - if (drm->device.info.chipset < 0x11) + if (drm->client.device.info.chipset < 0x11) dev->mode_config.async_page_flip = false; else dev->mode_config.async_page_flip = true; @@ -558,7 +559,7 @@ nouveau_display_create(struct drm_device *dev) int i; for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) { - ret = nvif_object_init(&drm->device.object, 0, + ret = nvif_object_init(&drm->client.device.object, 0, oclass[i], NULL, 0, &disp->disp); } @@ -1057,6 +1058,7 @@ int nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { + struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *bo; uint32_t domain; int ret; @@ -1066,12 +1068,12 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, args->size = roundup(args->size, PAGE_SIZE); /* Use VRAM if there is any ; otherwise fallback to system memory */ - if (nouveau_drm(dev)->device.info.ram_size != 0) + if (nouveau_drm(dev)->client.device.info.ram_size != 0) domain = NOUVEAU_GEM_DOMAIN_VRAM; else domain = NOUVEAU_GEM_DOMAIN_GART; - ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo); + ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo); if (ret) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 279497b15e7b..d234a3b70bad 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -37,6 +37,8 @@ #include <core/pci.h> #include <core/tegra.h> +#include <nvif/driver.h> + #include <nvif/class.h> #include <nvif/cl0002.h> #include <nvif/cla06f.h> @@ -109,35 +111,53 @@ nouveau_name(struct drm_device *dev) return nouveau_platform_name(dev->platformdev); } +static void +nouveau_cli_fini(struct nouveau_cli *cli) +{ + nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); + usif_client_fini(cli); + nvif_device_fini(&cli->device); + nvif_client_fini(&cli->base); +} + static int -nouveau_cli_create(struct drm_device *dev, const char *sname, - int size, void **pcli) +nouveau_cli_init(struct nouveau_drm *drm, const char *sname, + struct nouveau_cli *cli) { - struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL); + u64 device = nouveau_name(drm->dev); int ret; - if (cli) { - snprintf(cli->name, sizeof(cli->name), "%s", sname); - cli->dev = dev; - ret = nvif_client_init(NULL, cli->name, nouveau_name(dev), - nouveau_config, nouveau_debug, + snprintf(cli->name, sizeof(cli->name), "%s", sname); + cli->dev = drm->dev; + mutex_init(&cli->mutex); + usif_client_init(cli); + + if (cli == &drm->client) { + ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug, + cli->name, device, &cli->base); + } else { + ret = nvif_client_init(&drm->client.base, cli->name, device, &cli->base); - if (ret == 0) { - mutex_init(&cli->mutex); - usif_client_init(cli); - } - return ret; } - return -ENOMEM; -} + if (ret) { + NV_ERROR(drm, "Client allocation failed: %d\n", ret); + goto done; + } -static void -nouveau_cli_destroy(struct nouveau_cli *cli) -{ - nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); - nvif_client_fini(&cli->base); - usif_client_fini(cli); - kfree(cli); + ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE, + &(struct nv_device_v0) { + .device = ~0, + }, sizeof(struct nv_device_v0), + &cli->device); + if (ret) { + NV_ERROR(drm, "Device allocation failed: %d\n", ret); + goto done; + } + +done: + if (ret) + nouveau_cli_fini(cli); + return ret; } static void @@ -161,7 +181,7 @@ nouveau_accel_fini(struct nouveau_drm *drm) static void nouveau_accel_init(struct nouveau_drm *drm) { - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; struct nvif_sclass *sclass; u32 arg0, arg1; int ret, i, n; @@ -215,7 +235,7 @@ nouveau_accel_init(struct nouveau_drm *drm) } if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { - ret = nouveau_channel_new(drm, &drm->device, + ret = nouveau_channel_new(drm, &drm->client.device, NVA06F_V0_ENGINE_CE0 | NVA06F_V0_ENGINE_CE1, 0, &drm->cechan); @@ -228,7 +248,7 @@ nouveau_accel_init(struct nouveau_drm *drm) if (device->info.chipset >= 0xa3 && device->info.chipset != 0xaa && device->info.chipset != 0xac) { - ret = nouveau_channel_new(drm, &drm->device, + ret = nouveau_channel_new(drm, &drm->client.device, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); @@ -240,7 +260,8 @@ nouveau_accel_init(struct nouveau_drm *drm) arg1 = NvDmaTT; } - ret = nouveau_channel_new(drm, &drm->device, arg0, arg1, &drm->channel); + ret = nouveau_channel_new(drm, &drm->client.device, + arg0, arg1, &drm->channel); if (ret) { NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); nouveau_accel_fini(drm); @@ -280,8 +301,8 @@ nouveau_accel_init(struct nouveau_drm *drm) } if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { - ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false, - NULL, &drm->notify); + ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0, + false, NULL, &drm->notify); if (ret) { NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); nouveau_accel_fini(drm); @@ -407,12 +428,17 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) struct nouveau_drm *drm; int ret; - ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm); + if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL))) + return -ENOMEM; + dev->dev_private = drm; + drm->dev = dev; + + ret = nouveau_cli_init(drm, "DRM", &drm->client); if (ret) return ret; - dev->dev_private = drm; - drm->dev = dev; + dev->irq_enabled = true; + nvxx_client(&drm->client.base)->debug = nvkm_dbgopt(nouveau_debug, "DRM"); @@ -421,33 +447,24 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) nouveau_get_hdmi_dev(drm); - ret = nvif_device_init(&drm->client.base.object, 0, NV_DEVICE, - &(struct nv_device_v0) { - .device = ~0, - }, sizeof(struct nv_device_v0), - &drm->device); - if (ret) - goto fail_device; - - dev->irq_enabled = true; - /* workaround an odd issue on nvc1 by disabling the device's * nosnoop capability. hopefully won't cause issues until a * better fix is found - assuming there is one... */ - if (drm->device.info.chipset == 0xc1) - nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000); + if (drm->client.device.info.chipset == 0xc1) + nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000); nouveau_vga_init(drm); - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - if (!nvxx_device(&drm->device)->mmu) { + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + if (!nvxx_device(&drm->client.device)->mmu) { ret = -ENOSYS; goto fail_device; } - ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), - 0x1000, NULL, &drm->client.vm); + ret = nvkm_vm_new(nvxx_device(&drm->client.device), + 0, (1ULL << 40), 0x1000, NULL, + &drm->client.vm); if (ret) goto fail_device; @@ -497,8 +514,8 @@ fail_bios: fail_ttm: nouveau_vga_fini(drm); fail_device: - nvif_device_fini(&drm->device); - nouveau_cli_destroy(&drm->client); + nouveau_cli_fini(&drm->client); + kfree(drm); return ret; } @@ -527,10 +544,10 @@ nouveau_drm_unload(struct drm_device *dev) nouveau_ttm_fini(drm); nouveau_vga_fini(drm); - nvif_device_fini(&drm->device); if (drm->hdmi_device) pci_dev_put(drm->hdmi_device); - nouveau_cli_destroy(&drm->client); + nouveau_cli_fini(&drm->client); + kfree(drm); } void @@ -560,7 +577,6 @@ static int nouveau_do_suspend(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_cli *cli; int ret; nouveau_led_suspend(dev); @@ -590,7 +606,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) goto fail_display; } - NV_INFO(drm, "suspending client object trees...\n"); + NV_INFO(drm, "suspending fence...\n"); if (drm->fence && nouveau_fence(drm)->suspend) { if (!nouveau_fence(drm)->suspend(drm)) { ret = -ENOMEM; @@ -598,13 +614,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) } } - list_for_each_entry(cli, &drm->clients, head) { - ret = nvif_client_suspend(&cli->base); - if (ret) - goto fail_client; - } - - NV_INFO(drm, "suspending kernel object tree...\n"); + NV_INFO(drm, "suspending object tree...\n"); ret = nvif_client_suspend(&drm->client.base); if (ret) goto fail_client; @@ -612,10 +622,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) return 0; fail_client: - list_for_each_entry_continue_reverse(cli, &drm->clients, head) { - nvif_client_resume(&cli->base); - } - if (drm->fence && nouveau_fence(drm)->resume) nouveau_fence(drm)->resume(drm); @@ -631,19 +637,14 @@ static int nouveau_do_resume(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); - struct nouveau_cli *cli; - NV_INFO(drm, "resuming kernel object tree...\n"); + NV_INFO(drm, "resuming object tree...\n"); nvif_client_resume(&drm->client.base); - NV_INFO(drm, "resuming client object trees...\n"); + NV_INFO(drm, "resuming fence...\n"); if (drm->fence && nouveau_fence(drm)->resume) nouveau_fence(drm)->resume(drm); - list_for_each_entry(cli, &drm->clients, head) { - nvif_client_resume(&cli->base); - } - nouveau_run_vbios_init(dev); if (dev->mode_config.num_crtc) { @@ -758,7 +759,7 @@ nouveau_pmops_runtime_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); - struct nvif_device *device = &nouveau_drm(drm_dev)->device; + struct nvif_device *device = &nouveau_drm(drm_dev)->client.device; int ret; if (nouveau_runtime_pm == 0) @@ -772,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); ret = nouveau_do_resume(drm_dev, true); - drm_kms_helper_poll_enable(drm_dev); + + if (!drm_dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(drm_dev); + /* do magic */ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); @@ -841,20 +845,20 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) get_task_comm(tmpname, current); snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid)); - ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli); + if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) + return ret; + ret = nouveau_cli_init(drm, name, cli); if (ret) - goto out_suspend; + goto done; cli->base.super = false; - if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { - ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), - 0x1000, NULL, &cli->vm); - if (ret) { - nouveau_cli_destroy(cli); - goto out_suspend; - } + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0, + (1ULL << 40), 0x1000, NULL, &cli->vm); + if (ret) + goto done; nvxx_client(&cli->base)->vm = cli->vm; } @@ -865,10 +869,14 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) list_add(&cli->head, &drm->clients); mutex_unlock(&drm->client.mutex); -out_suspend: +done: + if (ret && cli) { + nouveau_cli_fini(cli); + kfree(cli); + } + pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); - return ret; } @@ -895,7 +903,8 @@ static void nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv) { struct nouveau_cli *cli = nouveau_cli(fpriv); - nouveau_cli_destroy(cli); + nouveau_cli_fini(cli); + kfree(cli); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 8d5ed5bfdacb..eadec2f49ad3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -86,14 +86,17 @@ enum nouveau_drm_handle { struct nouveau_cli { struct nvif_client base; + struct drm_device *dev; + struct mutex mutex; + + struct nvif_device device; + struct nvkm_vm *vm; /*XXX*/ struct list_head head; - struct mutex mutex; void *abi16; struct list_head objects; struct list_head notifys; char name[32]; - struct drm_device *dev; }; static inline struct nouveau_cli * @@ -111,7 +114,6 @@ struct nouveau_drm { struct nouveau_cli client; struct drm_device *dev; - struct nvif_device device; struct list_head clients; struct { @@ -165,6 +167,8 @@ struct nouveau_drm { struct backlight_device *backlight; struct list_head bl_connectors; struct work_struct hpd_work; + struct work_struct fbcon_work; + int fbcon_new_state; #ifdef CONFIG_ACPI struct notifier_block acpi_nb; #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index a9d48100e74f..2665a078b6da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -60,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; int ret; if (info->state != FBINFO_STATE_RUNNING) @@ -92,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image) { struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; int ret; if (info->state != FBINFO_STATE_RUNNING) @@ -124,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) { struct nouveau_fbdev *fbcon = info->par; struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; int ret; if (info->state != FBINFO_STATE_RUNNING) @@ -266,10 +266,10 @@ nouveau_fbcon_accel_init(struct drm_device *dev) struct fb_info *info = fbcon->helper.fbdev; int ret; - if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) ret = nv04_fbcon_accel_init(info); else - if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) + if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) ret = nv50_fbcon_accel_init(info); else ret = nvc0_fbcon_accel_init(info); @@ -324,7 +324,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, container_of(helper, struct nouveau_fbdev, helper); struct drm_device *dev = fbcon->helper.dev; struct nouveau_drm *drm = nouveau_drm(dev); - struct nvif_device *device = &drm->device; + struct nvif_device *device = &drm->client.device; struct fb_info *info; struct nouveau_framebuffer *fb; struct nouveau_channel *chan; @@ -341,8 +341,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); - ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height, - 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo); + ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] * + mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM, + 0, 0x0000, &nvbo); if (ret) { NV_ERROR(drm, "failed to allocate framebuffer\n"); goto out; @@ -471,19 +472,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .fb_probe = nouveau_fbcon_create, }; +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); + int state = READ_ONCE(drm->fbcon_new_state); + + if (state == FBINFO_STATE_RUNNING) + pm_runtime_get_sync(drm->dev->dev); + + console_lock(); + if (state == FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_restore(drm->dev); + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); + if (state != FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_save_disable(drm->dev); + console_unlock(); + + if (state == FBINFO_STATE_RUNNING) { + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_sync(drm->dev->dev); + } +} + void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { - console_lock(); - if (state == FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_restore(dev); - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); - if (state != FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_save_disable(dev); - console_unlock(); - } + + if (!drm->fbcon) + return; + + drm->fbcon_new_state = state; + /* Since runtime resume can happen as a result of a sysfs operation, + * it's possible we already have the console locked. So handle fbcon + * init/deinit from a seperate work thread + */ + schedule_work(&drm->fbcon_work); } int @@ -503,6 +528,7 @@ nouveau_fbcon_init(struct drm_device *dev) return -ENOMEM; drm->fbcon = fbcon; + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); @@ -514,10 +540,10 @@ nouveau_fbcon_init(struct drm_device *dev) if (ret) goto fini; - if (drm->device.info.ram_size <= 32 * 1024 * 1024) + if (drm->client.device.info.ram_size <= 32 * 1024 * 1024) preferred_bpp = 8; else - if (drm->device.info.ram_size <= 64 * 1024 * 1024) + if (drm->client.device.info.ram_size <= 64 * 1024 * 1024) preferred_bpp = 16; else preferred_bpp = 32; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index a6126c93f215..f3e551f1aa46 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -190,7 +190,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha return; ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler, - false, G82_CHANNEL_DMA_V0_NTFY_UEVENT, + false, NV826E_V0_NTFY_NON_STALL_INTERRUPT, &(struct nvif_notify_uevent_req) { }, sizeof(struct nvif_notify_uevent_req), sizeof(struct nvif_notify_uevent_rep), diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index ccdce1b4eec4..d5e58a38f160 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -99,6 +99,7 @@ struct nv84_fence_priv { struct nouveau_bo *bo; struct nouveau_bo *bo_gart; u32 *suspend; + struct mutex mutex; }; int nv84_fence_context_new(struct nouveau_channel *); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 201b52b750dd..ca5397beb357 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -175,11 +175,11 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) } int -nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, +nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, struct nouveau_bo **pnvbo) { - struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_drm *drm = nouveau_drm(cli->dev); struct nouveau_bo *nvbo; u32 flags = 0; int ret; @@ -194,7 +194,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) flags |= TTM_PL_FLAG_UNCACHED; - ret = nouveau_bo_new(dev, size, align, flags, tile_mode, + ret = |